From 6b30977d9e335597b1eef993e0a014e6d35f6c7c Mon Sep 17 00:00:00 2001 From: Faridun Mirzoev Date: Tue, 24 Mar 2026 16:47:46 -0400 Subject: [PATCH 1/2] feat(ag2): add AG2 framework backend integration MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Integrates AG2 (community fork of AutoGen, PyPI package: ag2) as a new framework option alongside praisonai, crewai, and autogen. Changes: - pyproject.toml: add [ag2] optional dependency extra (ag2>=0.11.0) - agents_generator.py: AG2 detection via importlib.metadata + _run_ag2() with LLMConfig dict pattern, GroupChat orchestration, Bedrock support, ChatResult.summary extraction, and TERMINATE cleanup - auto.py: AG2_AVAILABLE flag + validation in AutoGenerator.__init__ - .env.example: add AG2 and AWS Bedrock environment variable templates - examples/ag2/: basic, multi-agent, and Bedrock YAML examples - tests: 16 unit tests + 9 mock integration tests (25/25 passing) Detection uses importlib.metadata.distribution('ag2') + LLMConfig check. All changes are purely additive — existing code paths unaffected. E2E tested against OpenAI gpt-4o-mini: single-agent and multi-agent GroupChat flows both produce correct output. --- examples/ag2/ag2_basic.yaml | 30 + examples/ag2/ag2_bedrock.yaml | 42 ++ examples/ag2/ag2_multi_agent.yaml | 54 ++ src/praisonai/.env.example | 16 +- src/praisonai/praisonai/agents_generator.py | 146 ++++- src/praisonai/praisonai/auto.py | 15 + src/praisonai/pyproject.toml | 3 + .../tests/integration/ag2/__init__.py | 0 .../integration/ag2/test_ag2_integration.py | 477 +++++++++++++++ .../tests/source/ag2_function_tools.py | 98 ++++ src/praisonai/tests/unit/test_ag2_adapter.py | 542 ++++++++++++++++++ 11 files changed, 1418 insertions(+), 5 deletions(-) create mode 100644 examples/ag2/ag2_basic.yaml create mode 100644 examples/ag2/ag2_bedrock.yaml create mode 100644 examples/ag2/ag2_multi_agent.yaml create mode 100644 src/praisonai/tests/integration/ag2/__init__.py create mode 100644 src/praisonai/tests/integration/ag2/test_ag2_integration.py create mode 100644 src/praisonai/tests/source/ag2_function_tools.py create mode 100644 src/praisonai/tests/unit/test_ag2_adapter.py diff --git a/examples/ag2/ag2_basic.yaml b/examples/ag2/ag2_basic.yaml new file mode 100644 index 000000000..64d70d26b --- /dev/null +++ b/examples/ag2/ag2_basic.yaml @@ -0,0 +1,30 @@ +framework: ag2 +topic: "Research the latest developments in AI agents" + +# Install: pip install "praisonai[ag2]" +# Run: praisonai --framework ag2 examples/ag2/ag2_basic.yaml +# or praisonai run examples/ag2/ag2_basic.yaml --framework ag2 + +roles: + research_agent: + role: "AI Research Specialist" + goal: "Research and summarise the latest developments in AI agent frameworks" + backstory: | + You are an experienced AI researcher with deep knowledge of multi-agent + systems, large language models, and the latest trends in AI tooling. + You excel at synthesising complex technical topics into clear summaries. + tasks: + research_task: + description: | + Research and summarise the latest developments in AI agent frameworks + for the topic: {topic} + + Focus on: + 1. Key frameworks and their unique capabilities + 2. Recent innovations and improvements + 3. Community adoption and ecosystem growth + 4. Practical use cases and success stories + expected_output: | + A concise research summary covering the key developments, + major frameworks, and practical insights. Include 3-5 bullet + points of the most important findings. diff --git a/examples/ag2/ag2_bedrock.yaml b/examples/ag2/ag2_bedrock.yaml new file mode 100644 index 000000000..2d501eeba --- /dev/null +++ b/examples/ag2/ag2_bedrock.yaml @@ -0,0 +1,42 @@ +framework: ag2 +topic: "Cloud-native AI deployment strategies on AWS" + +# AG2 exclusive feature: native AWS Bedrock support via LLMConfig(api_type="bedrock") +# +# Prerequisites: +# pip install "praisonai[ag2]" +# aws configure (or set AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_DEFAULT_REGION) +# +# Run: +# praisonai --framework ag2 examples/ag2/ag2_bedrock.yaml +# +# The AG2 adapter detects api_type="bedrock" from the llm config and uses +# LLMConfig(api_type="bedrock", model=...) — no OPENAI_API_KEY required. +# AWS credentials are sourced from boto3 (env vars, ~/.aws/credentials, IAM role). + +roles: + cloud_architect: + role: "AWS Cloud Architect" + goal: "Design and explain cloud-native AI deployment strategies on AWS" + backstory: | + You are an AWS Solutions Architect specialising in AI/ML workloads. + You have deep expertise in Amazon Bedrock, SageMaker, ECS, and Lambda, + and you help organisations deploy AI agents at scale securely and cost-effectively. + llm: + model: "bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0" + api_type: "bedrock" + aws_region: "us-east-1" + tasks: + architecture_task: + description: | + Design a cloud-native deployment strategy for AI agents on AWS for: {topic} + + Cover: + 1. Recommended AWS services (Bedrock, ECS, Lambda, etc.) + 2. Scalability and cost optimisation patterns + 3. Security and compliance considerations + 4. A simple reference architecture overview + expected_output: | + A concise architecture guide with service recommendations, + a high-level deployment diagram description, and key + best practices for production AI agent deployments on AWS. diff --git a/examples/ag2/ag2_multi_agent.yaml b/examples/ag2/ag2_multi_agent.yaml new file mode 100644 index 000000000..b3b90b015 --- /dev/null +++ b/examples/ag2/ag2_multi_agent.yaml @@ -0,0 +1,54 @@ +framework: ag2 +topic: "The impact of open-source AI on enterprise software development" + +# Install: pip install "praisonai[ag2]" +# Run: praisonai --framework ag2 examples/ag2/ag2_multi_agent.yaml +# or praisonai run examples/ag2/ag2_multi_agent.yaml --framework ag2 +# +# This example demonstrates AG2's GroupChat multi-agent coordination. +# Both agents participate in a collaborative conversation managed by +# a GroupChatManager until the task is complete. + +roles: + researcher: + role: "Research Specialist" + goal: "Gather and analyse information on the given topic" + backstory: | + You are a meticulous researcher who excels at finding relevant + information, analysing trends, and presenting data-backed insights. + You always cite your reasoning and structure your findings clearly. + tasks: + research_task: + description: | + Research the topic: {topic} + + Investigate: + 1. Current state and adoption rates + 2. Key players and projects driving the trend + 3. Technical advantages and challenges + 4. Business impact and cost implications + expected_output: | + A structured research briefing with findings on the topic, + including key data points, trends, and technical observations. + + writer: + role: "Technical Content Writer" + goal: "Transform research findings into clear, engaging written content" + backstory: | + You are a skilled technical writer who turns complex research into + accessible, well-structured articles. You focus on clarity, logical + flow, and actionable takeaways for a professional audience. + tasks: + writing_task: + description: | + Using the research findings provided by the Research Specialist, + write a concise article on: {topic} + + The article should: + 1. Open with a compelling hook + 2. Present key findings logically + 3. Include practical implications for developers + 4. Close with a forward-looking conclusion + expected_output: | + A 400-500 word article suitable for a technical blog, + with clear sections, professional tone, and concrete takeaways. diff --git a/src/praisonai/.env.example b/src/praisonai/.env.example index 645eca6de..0094ca9cf 100644 --- a/src/praisonai/.env.example +++ b/src/praisonai/.env.example @@ -1,6 +1,16 @@ -OPENAI_MODEL_NAME="gpt-4o" +# OpenAI / compatible API OPENAI_API_KEY="Enter your API key" +OPENAI_MODEL_NAME="gpt-4o" OPENAI_API_BASE="https://api.openai.com/v1" + +# AG2 framework (uses same OPENAI_* vars above, or override below) +# MODEL_NAME=gpt-4o-mini + +# AWS Bedrock (for ag2_bedrock.yaml example) +# AWS_DEFAULT_REGION=us-east-1 +# AWS_ACCESS_KEY_ID=your-access-key +# AWS_SECRET_ACCESS_KEY=your-secret-key + +# Chainlit (optional) CHAINLIT_USERNAME=admin -CHAINLIT_USERNAME=admin -CHAINLIT_AUTH_SECRET="chainlit create-secret to create" \ No newline at end of file +CHAINLIT_AUTH_SECRET="chainlit create-secret to create" diff --git a/src/praisonai/praisonai/agents_generator.py b/src/praisonai/praisonai/agents_generator.py index a73984edf..8bafc7b50 100644 --- a/src/praisonai/praisonai/agents_generator.py +++ b/src/praisonai/praisonai/agents_generator.py @@ -41,6 +41,16 @@ except ImportError: pass +AG2_AVAILABLE = False +try: + import importlib.metadata as _importlib_metadata + _importlib_metadata.distribution('ag2') + from autogen import LLMConfig as _AG2LLMConfig # noqa: F401 — AG2-exclusive class + AG2_AVAILABLE = True + del _AG2LLMConfig, _importlib_metadata +except Exception: + pass + try: import agentops AGENTOPS_AVAILABLE = True @@ -51,7 +61,7 @@ pass # Only try to import praisonai_tools if either CrewAI or AutoGen is available -if CREWAI_AVAILABLE or AUTOGEN_AVAILABLE or PRAISONAI_AVAILABLE: +if CREWAI_AVAILABLE or AUTOGEN_AVAILABLE or PRAISONAI_AVAILABLE or AG2_AVAILABLE: try: from praisonai_tools import ( CodeDocsSearchTool, CSVSearchTool, DirectorySearchTool, DOCXSearchTool, DirectoryReadTool, @@ -127,6 +137,8 @@ def __init__(self, agent_file, framework, config_list, log_level=None, agent_cal raise ImportError("AutoGen is not installed. Please install it with 'pip install praisonai[autogen]'") elif framework == "praisonai" and not PRAISONAI_AVAILABLE: raise ImportError("PraisonAI is not installed. Please install it with 'pip install praisonaiagents'") + elif framework == "ag2" and not AG2_AVAILABLE: + raise ImportError("AG2 is not installed. Please install it with 'pip install praisonai[ag2]'") def is_function_or_decorated(self, obj): """ @@ -274,7 +286,7 @@ def generate_crew_and_kickoff(self): tools_dict = {} # Only try to use praisonai_tools if it's available and needed - if PRAISONAI_TOOLS_AVAILABLE and (CREWAI_AVAILABLE or AUTOGEN_AVAILABLE or PRAISONAI_AVAILABLE): + if PRAISONAI_TOOLS_AVAILABLE and (CREWAI_AVAILABLE or AUTOGEN_AVAILABLE or PRAISONAI_AVAILABLE or AG2_AVAILABLE): tools_dict = { 'CodeDocsSearchTool': CodeDocsSearchTool(), 'CSVSearchTool': CSVSearchTool(), @@ -327,6 +339,12 @@ def generate_crew_and_kickoff(self): if AGENTOPS_AVAILABLE: agentops.init(os.environ.get("AGENTOPS_API_KEY"), default_tags=["praisonai"]) return self._run_praisonai(config, topic, tools_dict) + elif framework == "ag2": + if not AG2_AVAILABLE: + raise ImportError("AG2 is not installed. Please install it with 'pip install praisonai[ag2]'") + if AGENTOPS_AVAILABLE: + agentops.init(os.environ.get("AGENTOPS_API_KEY"), default_tags=["ag2"]) + return self._run_ag2(config, topic, tools_dict) else: # framework=crewai if not CREWAI_AVAILABLE: raise ImportError("CrewAI is not installed. Please install it with 'pip install praisonai[crewai]'") @@ -407,6 +425,130 @@ def _run_autogen(self, config, topic, tools_dict): return result + def _run_ag2(self, config, topic, tools_dict): + """ + Run agents using the AG2 framework (community fork of AutoGen, PyPI: ag2). + + AG2 installs under the 'autogen' namespace — there is no 'import ag2'. + Uses LLMConfig context manager + AssistantAgent + GroupChat pattern. + + Args: + config (dict): Configuration dictionary parsed from YAML + topic (str): The topic/task to process + tools_dict (dict): Dictionary of available tools + + Returns: + str: Result prefixed with '### AG2 Output ###' + """ + import re + from autogen import ( + AssistantAgent, UserProxyAgent, GroupChat, GroupChatManager, LLMConfig + ) + + model_config = self.config_list[0] if self.config_list else {} + api_type = model_config.get("api_type", "openai").lower() + model_name = model_config.get("model", "gpt-4o-mini") + api_key = model_config.get("api_key") or os.environ.get("OPENAI_API_KEY") + base_url = model_config.get("base_url") or os.environ.get("OPENAI_BASE_URL") + + # Build LLMConfig — pass a config dict; Bedrock needs no api_key + if api_type == "bedrock": + llm_config_entry = {"api_type": "bedrock", "model": model_name} + else: + llm_config_entry = {"model": model_name} + if api_key: + llm_config_entry["api_key"] = api_key + if base_url and base_url != "https://api.openai.com/v1": + llm_config_entry["base_url"] = base_url + llm_config = LLMConfig(llm_config_entry) + + user_proxy = UserProxyAgent( + name="User", + human_input_mode="NEVER", + is_termination_msg=lambda x: "TERMINATE" in (x.get("content") or ""), + code_execution_config=False, + ) + + # Create one AssistantAgent per role, passing llm_config directly + ag2_agent_entries = [] + for role, details in config["roles"].items(): + agent_name = details.get("role", role).replace("{topic}", topic) + backstory = details.get("backstory", "").replace("{topic}", topic) + agent_name_safe = re.sub(r"[^a-zA-Z0-9_\-]", "_", agent_name) + assistant = AssistantAgent( + name=agent_name_safe, + system_message=backstory + "\nWhen the task is done, reply 'TERMINATE'.", + llm_config=llm_config, + ) + ag2_agent_entries.append((role, details, assistant)) + + # Register tools via AG2 decorator pattern + for role, details, assistant in ag2_agent_entries: + for tool_name in details.get("tools", []): + tool = tools_dict.get(tool_name) + if tool is None: + continue + func = tool if callable(tool) else getattr(tool, "run", None) + if func is None: + continue + + def make_tool_fn(f): + def tool_fn(**kwargs): + return f(**kwargs) if callable(f) else str(f) + tool_fn.__name__ = tool_name + return tool_fn + + wrapped = make_tool_fn(func) + assistant.register_for_llm(description=f"Tool: {tool_name}")(wrapped) + user_proxy.register_for_execution()(wrapped) + + all_assistants = [a for _, _, a in ag2_agent_entries] + if not all_assistants: + return "### AG2 Output ###\nNo agents created from configuration." + + # Build initial message from all task descriptions + task_lines = [] + for role, details, _ in ag2_agent_entries: + for task_name, task_details in details.get("tasks", {}).items(): + desc = task_details.get("description", "").replace("{topic}", topic) + if desc: + task_lines.append(desc) + initial_message = "\n".join(task_lines) if task_lines else topic + + groupchat = GroupChat( + agents=[user_proxy] + all_assistants, + messages=[], + max_round=12, + ) + manager = GroupChatManager(groupchat=groupchat, llm_config=llm_config) + + try: + chat_result = user_proxy.initiate_chat(manager, message=initial_message) + except Exception as e: + return f"### AG2 Error ###\n{str(e)}" + + # Prefer ChatResult.summary if available, otherwise scan messages + result_content = "" + summary = getattr(chat_result, "summary", None) + if summary and isinstance(summary, str) and summary.strip(): + result_content = re.sub(r'[\s\.\,]*TERMINATE[\s\.\,]*$', '', summary, flags=re.IGNORECASE).strip().rstrip('.') + + if not result_content: + for msg in reversed(groupchat.messages): + # Skip the initial user proxy message + if msg.get("name") == "User": + continue + content = (msg.get("content") or "").strip() + if content: + result_content = re.sub(r'[\s\.\,]*TERMINATE[\s\.\,]*$', '', content, flags=re.IGNORECASE).strip().rstrip('.') + if result_content: + break + + if not result_content: + result_content = "Task completed." + + return f"### AG2 Output ###\n{result_content}" + def _run_crewai(self, config, topic, tools_dict): """ Run agents using the CrewAI framework. diff --git a/src/praisonai/praisonai/auto.py b/src/praisonai/praisonai/auto.py index 1f6342224..67bd4637d 100644 --- a/src/praisonai/praisonai/auto.py +++ b/src/praisonai/praisonai/auto.py @@ -32,6 +32,16 @@ except ImportError: pass +AG2_AVAILABLE = False +try: + import importlib.metadata as _importlib_metadata + _importlib_metadata.distribution('ag2') + from autogen import LLMConfig as _AG2LLMConfig # noqa: F401 — AG2-exclusive class + AG2_AVAILABLE = True + del _AG2LLMConfig, _importlib_metadata +except Exception: + pass + try: from praisonai_tools import ( CodeDocsSearchTool, CSVSearchTool, DirectorySearchTool, DOCXSearchTool, @@ -83,6 +93,11 @@ def __init__(self, topic="Movie Story writing about AI", agent_file="test.yaml", Praisonai is not installed. Please install with: pip install praisonaiagents """) + elif framework == "ag2" and not AG2_AVAILABLE: + raise ImportError(""" +AG2 is not installed. Please install with: + pip install "praisonai[ag2]" +""") # Only show tools message if using a framework and tools are needed if (framework in ["crewai", "autogen"]) and not PRAISONAI_TOOLS_AVAILABLE: diff --git a/src/praisonai/pyproject.toml b/src/praisonai/pyproject.toml index c563733bf..fd39f72da 100644 --- a/src/praisonai/pyproject.toml +++ b/src/praisonai/pyproject.toml @@ -92,6 +92,7 @@ call = [ train = [] crewai = ["crewai>=0.32.0", "praisonai-tools>=0.0.15"] autogen = ["pyautogen>=0.2.19", "praisonai-tools>=0.0.15", "crewai"] +ag2 = ["ag2>=0.11.0", "praisonai-tools>=0.0.15"] [tool.poetry] name = "PraisonAI" @@ -119,6 +120,7 @@ instructor = ">=1.3.3" PyYAML = ">=6.0" mcp = ">=1.6.0" pyautogen = {version = ">=0.2.19", optional = true} +ag2 = {version = ">=0.11.0", optional = true} crewai = {version = ">=0.32.0", optional = true} praisonai-tools = {version = ">=0.0.15", optional = true} chainlit = {version = "==2.5.5", optional = true} @@ -277,6 +279,7 @@ call = [ ] crewai = ["crewai", "praisonai-tools"] autogen = ["pyautogen", "praisonai-tools", "crewai"] +ag2 = ["ag2", "praisonai-tools"] [tool.poetry-dynamic-versioning] enable = true diff --git a/src/praisonai/tests/integration/ag2/__init__.py b/src/praisonai/tests/integration/ag2/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/praisonai/tests/integration/ag2/test_ag2_integration.py b/src/praisonai/tests/integration/ag2/test_ag2_integration.py new file mode 100644 index 000000000..1e63cafaa --- /dev/null +++ b/src/praisonai/tests/integration/ag2/test_ag2_integration.py @@ -0,0 +1,477 @@ +""" +AG2 Mock Integration Tests — verifies PraisonAI correctly orchestrates AG2 agents. + +Uses mocked AG2 responses — no real LLM API calls are made. +Fast: all tests should complete in < 1s each. + +Run: + pytest tests/integration/ag2/test_ag2_integration.py -v +""" + +import pytest +import os +import sys +from unittest.mock import patch, MagicMock + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../../../")) + +# Stub heavy dependencies that auto.py (develop branch) imports at module level +# so that tests can import praisonai without a full installation. +for _stub in ("instructor",): + if _stub not in sys.modules: + sys.modules[_stub] = MagicMock() + +import importlib as _importlib +if "openai" not in sys.modules: + try: + _importlib.import_module("openai") + except ImportError: + _mock_openai = MagicMock() + _mock_openai.__version__ = "1.0.0" + sys.modules["openai"] = _mock_openai + + +# --------------------------------------------------------------------------- +# Fixtures +# --------------------------------------------------------------------------- + +@pytest.fixture +def single_agent_yaml(): + return """ +framework: ag2 +topic: Write a short poem about Python +roles: + poet: + role: "Creative Poet" + goal: "Write elegant and concise poems" + backstory: "You are a creative poet who loves Python and technology." + tasks: + write_poem: + description: "Write a short 4-line poem about Python programming" + expected_output: "A 4-line poem about Python" +""" + + +@pytest.fixture +def multi_agent_yaml(): + return """ +framework: ag2 +topic: Explain why open-source AI matters +roles: + researcher: + role: "Research Analyst" + goal: "Research and gather key facts" + backstory: "Expert in technology research and trend analysis." + tasks: + research: + description: "Research why open-source AI matters" + expected_output: "Key facts and data points" + writer: + role: "Technical Writer" + goal: "Write clear technical content" + backstory: "Professional technical writer with 10 years experience." + tasks: + write: + description: "Write a clear explanation of why open-source AI matters" + expected_output: "A 200-word explanation" +""" + + +@pytest.fixture +def mock_ag2_classes(): + """Mock all AG2 classes used in _run_ag2.""" + mock_llm_config = MagicMock() + mock_llm_config.__enter__ = MagicMock(return_value=mock_llm_config) + mock_llm_config.__exit__ = MagicMock(return_value=False) + + mock_assistant = MagicMock() + mock_assistant.name = "test_agent" + + mock_user_proxy = MagicMock() + mock_user_proxy.name = "User" + + mock_groupchat = MagicMock() + mock_groupchat.messages = [ + { + "name": "test_agent", + "role": "assistant", + "content": "Task completed successfully. TERMINATE", + } + ] + + mock_manager = MagicMock() + + return { + "llm_config": mock_llm_config, + "assistant": mock_assistant, + "user_proxy": mock_user_proxy, + "groupchat": mock_groupchat, + "manager": mock_manager, + } + + +# --------------------------------------------------------------------------- +# AG2 import detection +# --------------------------------------------------------------------------- + +class TestAG2Import: + + @pytest.mark.integration + def test_ag2_distribution_available(self): + """ag2 PyPI distribution should be findable when installed. + + Note: ag2 installs under the 'autogen' namespace — 'import ag2' does NOT work. + Detection uses importlib.metadata to check the 'ag2' distribution name. + """ + import importlib.metadata + try: + dist = importlib.metadata.distribution('ag2') + assert dist.metadata['Name'] == 'ag2' + except importlib.metadata.PackageNotFoundError: + pytest.skip("ag2 not installed — skipping AG2 integration tests") + + @pytest.mark.integration + def test_autogen_namespace_importable_with_ag2(self): + """When ag2 is installed, autogen namespace classes are importable.""" + try: + from autogen import AssistantAgent, UserProxyAgent, GroupChat, LLMConfig + assert AssistantAgent is not None + assert UserProxyAgent is not None + assert GroupChat is not None + assert LLMConfig is not None + except ImportError: + pytest.skip("ag2 not installed — autogen namespace not available") + + +# --------------------------------------------------------------------------- +# Single-agent flow (mocked) +# --------------------------------------------------------------------------- + +class TestAG2SingleAgentFlow: + + @pytest.mark.integration + def test_single_agent_yaml_initialises_praisonai(self, single_agent_yaml, mock_ag2_classes): + """PraisonAI initialises correctly with framework='ag2' from YAML.""" + import tempfile + + with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as f: + f.write(single_agent_yaml) + yaml_path = f.name + + try: + with patch("praisonai.agents_generator.AG2_AVAILABLE", True), \ + patch("praisonai.agents_generator.PRAISONAI_AVAILABLE", True): + try: + from praisonai import PraisonAI + ai = PraisonAI(agent_file=yaml_path, framework="ag2") + assert ai.framework == "ag2" + except ImportError as e: + pytest.skip(f"PraisonAI not available: {e}") + finally: + os.unlink(yaml_path) + + @pytest.mark.integration + def test_single_agent_run_returns_ag2_output(self, mock_ag2_classes): + """_run_ag2 executes single-agent flow and returns '### AG2 Output ###'.""" + config = { + "framework": "ag2", + "topic": "Write a poem", + "roles": { + "poet": { + "role": "Creative Poet", + "goal": "Write poems", + "backstory": "A creative poet.", + "tasks": { + "task1": { + "description": "Write a poem about Python", + "expected_output": "A short poem", + } + }, + "tools": [], + } + }, + } + + m = mock_ag2_classes + with patch("praisonai.agents_generator.AG2_AVAILABLE", True), \ + patch("praisonai.agents_generator.CREWAI_AVAILABLE", False), \ + patch("praisonai.agents_generator.AUTOGEN_AVAILABLE", False), \ + patch("praisonai.agents_generator.PRAISONAI_AVAILABLE", True): + try: + from praisonai.agents_generator import AgentsGenerator + except ImportError as e: + pytest.skip(f"AgentsGenerator not available: {e}") + + gen = AgentsGenerator( + agent_file="agents.yaml", + framework="ag2", + config_list=[{"model": "gpt-4o-mini", "api_key": "sk-test"}], + ) + + with patch("autogen.LLMConfig", return_value=m["llm_config"]), \ + patch("autogen.AssistantAgent", return_value=m["assistant"]), \ + patch("autogen.UserProxyAgent", return_value=m["user_proxy"]), \ + patch("autogen.GroupChat", return_value=m["groupchat"]), \ + patch("autogen.GroupChatManager", return_value=m["manager"]): + + result = gen._run_ag2(config, "Write a poem", {}) + + assert "### AG2 Output ###" in result + assert "Task completed successfully" in result + + @pytest.mark.integration + def test_single_agent_calls_initiate_chat(self, mock_ag2_classes): + """user_proxy.initiate_chat is called with the manager and initial message.""" + config = { + "framework": "ag2", + "topic": "Test topic", + "roles": { + "agent": { + "role": "Agent", + "goal": "Help", + "backstory": "Helpful agent.", + "tasks": { + "t": {"description": "Do the task", "expected_output": "Done"} + }, + "tools": [], + } + }, + } + + m = mock_ag2_classes + with patch("praisonai.agents_generator.AG2_AVAILABLE", True), \ + patch("praisonai.agents_generator.CREWAI_AVAILABLE", False), \ + patch("praisonai.agents_generator.AUTOGEN_AVAILABLE", False), \ + patch("praisonai.agents_generator.PRAISONAI_AVAILABLE", True): + try: + from praisonai.agents_generator import AgentsGenerator + except ImportError as e: + pytest.skip(f"AgentsGenerator not available: {e}") + + gen = AgentsGenerator( + agent_file="agents.yaml", + framework="ag2", + config_list=[{"model": "gpt-4o-mini", "api_key": "sk-test"}], + ) + + with patch("autogen.LLMConfig", return_value=m["llm_config"]), \ + patch("autogen.AssistantAgent", return_value=m["assistant"]), \ + patch("autogen.UserProxyAgent", return_value=m["user_proxy"]), \ + patch("autogen.GroupChat", return_value=m["groupchat"]), \ + patch("autogen.GroupChatManager", return_value=m["manager"]): + + gen._run_ag2(config, "Test topic", {}) + + m["user_proxy"].initiate_chat.assert_called_once() + call_args = m["user_proxy"].initiate_chat.call_args + # First positional arg should be the manager + assert call_args[0][0] is m["manager"] + # message kwarg should be non-empty (task description or topic) + message = call_args[1].get("message", "") + assert message != "" + + +# --------------------------------------------------------------------------- +# Multi-agent GroupChat flow (mocked) +# --------------------------------------------------------------------------- + +class TestAG2MultiAgentGroupChatFlow: + + @pytest.mark.integration + def test_multi_agent_creates_correct_number_of_assistants(self, multi_agent_yaml, mock_ag2_classes): + """Two roles in YAML → two AssistantAgents created.""" + config = { + "framework": "ag2", + "topic": "Explain open-source AI", + "roles": { + "researcher": { + "role": "Research Analyst", "goal": "Research", "backstory": "Analyst.", + "tasks": {"r": {"description": "Research it", "expected_output": "Facts"}}, + "tools": [], + }, + "writer": { + "role": "Technical Writer", "goal": "Write", "backstory": "Writer.", + "tasks": {"w": {"description": "Write it", "expected_output": "Article"}}, + "tools": [], + }, + }, + } + + m = mock_ag2_classes + assistant_call_count = [0] + + def count_assistant(**kwargs): + assistant_call_count[0] += 1 + a = MagicMock() + a.name = kwargs.get("name", f"agent_{assistant_call_count[0]}") + return a + + with patch("praisonai.agents_generator.AG2_AVAILABLE", True), \ + patch("praisonai.agents_generator.CREWAI_AVAILABLE", False), \ + patch("praisonai.agents_generator.AUTOGEN_AVAILABLE", False), \ + patch("praisonai.agents_generator.PRAISONAI_AVAILABLE", True): + try: + from praisonai.agents_generator import AgentsGenerator + except ImportError as e: + pytest.skip(f"AgentsGenerator not available: {e}") + + gen = AgentsGenerator( + agent_file="agents.yaml", + framework="ag2", + config_list=[{"model": "gpt-4o-mini", "api_key": "sk-test"}], + ) + + with patch("autogen.LLMConfig", return_value=m["llm_config"]), \ + patch("autogen.AssistantAgent", side_effect=count_assistant), \ + patch("autogen.UserProxyAgent", return_value=m["user_proxy"]), \ + patch("autogen.GroupChat", return_value=m["groupchat"]), \ + patch("autogen.GroupChatManager", return_value=m["manager"]): + + gen._run_ag2(config, "Explain open-source AI", {}) + + assert assistant_call_count[0] == 2 + + @pytest.mark.integration + def test_multi_agent_groupchat_max_round_set(self, mock_ag2_classes): + """GroupChat is created with max_round parameter.""" + config = { + "framework": "ag2", + "topic": "Test", + "roles": { + "a": { + "role": "Agent", "goal": "Help", "backstory": "Helper.", + "tasks": {"t": {"description": "Do", "expected_output": "Done"}}, + "tools": [], + } + }, + } + + m = mock_ag2_classes + groupchat_kwargs = {} + + def capture_groupchat(**kwargs): + groupchat_kwargs.update(kwargs) + return m["groupchat"] + + with patch("praisonai.agents_generator.AG2_AVAILABLE", True), \ + patch("praisonai.agents_generator.CREWAI_AVAILABLE", False), \ + patch("praisonai.agents_generator.AUTOGEN_AVAILABLE", False), \ + patch("praisonai.agents_generator.PRAISONAI_AVAILABLE", True): + try: + from praisonai.agents_generator import AgentsGenerator + except ImportError as e: + pytest.skip(f"AgentsGenerator not available: {e}") + + gen = AgentsGenerator( + agent_file="agents.yaml", + framework="ag2", + config_list=[{"model": "gpt-4o-mini", "api_key": "sk-test"}], + ) + + with patch("autogen.LLMConfig", return_value=m["llm_config"]), \ + patch("autogen.AssistantAgent", return_value=m["assistant"]), \ + patch("autogen.UserProxyAgent", return_value=m["user_proxy"]), \ + patch("autogen.GroupChat", side_effect=capture_groupchat), \ + patch("autogen.GroupChatManager", return_value=m["manager"]): + + gen._run_ag2(config, "Test", {}) + + assert "max_round" in groupchat_kwargs + assert groupchat_kwargs["max_round"] > 0 + + +# --------------------------------------------------------------------------- +# Backward-compatibility: existing autogen/crewai paths unaffected +# --------------------------------------------------------------------------- + +class TestAG2BackwardCompatibility: + + @pytest.mark.integration + def test_autogen_framework_still_works(self): + """framework='autogen' dispatches to _run_autogen, not _run_ag2.""" + import yaml + + with patch("praisonai.agents_generator.AUTOGEN_AVAILABLE", True), \ + patch("praisonai.agents_generator.AG2_AVAILABLE", False), \ + patch("praisonai.agents_generator.CREWAI_AVAILABLE", False), \ + patch("praisonai.agents_generator.PRAISONAI_AVAILABLE", True): + try: + from praisonai.agents_generator import AgentsGenerator + except ImportError as e: + pytest.skip(f"AgentsGenerator not available: {e}") + + gen = AgentsGenerator( + agent_file="agents.yaml", + framework="autogen", + config_list=[{"model": "gpt-4o-mini", "api_key": "sk-test"}], + ) + assert gen.framework == "autogen" + + gen.agent_yaml = yaml.dump({ + "framework": "autogen", + "topic": "Test", + "roles": { + "agent": { + "role": "Agent", "goal": "Help", "backstory": "Helper.", + "tasks": {"t": {"description": "Do", "expected_output": "Done"}}, + "tools": [], + } + }, + }) + + # Verify _run_ag2 is NOT called when framework='autogen' + with patch.object(gen, "_run_ag2") as mock_run_ag2, \ + patch.object(gen, "_run_autogen", return_value="autogen result") as mock_run_autogen: + gen.generate_crew_and_kickoff() + + mock_run_ag2.assert_not_called() + mock_run_autogen.assert_called_once() + + @pytest.mark.integration + def test_ag2_framework_dispatches_to_run_ag2(self, mock_ag2_classes): + """framework='ag2' dispatches to _run_ag2 and NOT to _run_autogen.""" + import yaml + + m = mock_ag2_classes + with patch("praisonai.agents_generator.AG2_AVAILABLE", True), \ + patch("praisonai.agents_generator.CREWAI_AVAILABLE", False), \ + patch("praisonai.agents_generator.AUTOGEN_AVAILABLE", False), \ + patch("praisonai.agents_generator.PRAISONAI_AVAILABLE", True): + try: + from praisonai.agents_generator import AgentsGenerator + except ImportError as e: + pytest.skip(f"AgentsGenerator not available: {e}") + + gen = AgentsGenerator( + agent_file="agents.yaml", + framework="ag2", + config_list=[{"model": "gpt-4o-mini", "api_key": "sk-test"}], + ) + + gen.agent_yaml = yaml.dump({ + "framework": "ag2", + "topic": "Test", + "roles": { + "agent": { + "role": "Agent", "goal": "Help", "backstory": "Helper.", + "tasks": {"t": {"description": "Do", "expected_output": "Done"}}, + "tools": [], + } + }, + }) + + with patch.object(gen, "_run_autogen") as mock_autogen, \ + patch("autogen.LLMConfig", return_value=m["llm_config"]), \ + patch("autogen.AssistantAgent", return_value=m["assistant"]), \ + patch("autogen.UserProxyAgent", return_value=m["user_proxy"]), \ + patch("autogen.GroupChat", return_value=m["groupchat"]), \ + patch("autogen.GroupChatManager", return_value=m["manager"]): + + result = gen.generate_crew_and_kickoff() + + mock_autogen.assert_not_called() + assert "### AG2 Output ###" in result + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/src/praisonai/tests/source/ag2_function_tools.py b/src/praisonai/tests/source/ag2_function_tools.py new file mode 100644 index 000000000..258f32470 --- /dev/null +++ b/src/praisonai/tests/source/ag2_function_tools.py @@ -0,0 +1,98 @@ +""" +Example of AG2 tool registration for use with PraisonAI's --framework ag2. + +AG2 (community fork of AutoGen, PyPI package: ag2) installs under the +'autogen' namespace but is a distinct package from pyautogen/pyautogen2. + +Usage with PraisonAI: + pip install "praisonai[ag2]" + praisonai --framework ag2 agents.yaml + +Standalone run: + python tests/source/ag2_function_tools.py +""" +from typing import Annotated, Literal +import os + +# ag2 installs under the 'autogen' namespace +from autogen import ( + AssistantAgent, + UserProxyAgent, + GroupChat, + GroupChatManager, + LLMConfig, +) + +Operator = Literal["+", "-", "*", "/"] + + +def calculator( + a: Annotated[int, "First operand"], + b: Annotated[int, "Second operand"], + operator: Annotated[Operator, "Arithmetic operator: +, -, *, /"], +) -> int: + """Perform basic arithmetic operations.""" + if operator == "+": + return a + b + elif operator == "-": + return a - b + elif operator == "*": + return a * b + elif operator == "/": + if b == 0: + raise ValueError("Division by zero") + return int(a / b) + else: + raise ValueError(f"Invalid operator: {operator}") + + +# Build LLMConfig — AG2 uses a context manager pattern so agents +# created inside the 'with' block automatically inherit the config. +llm_config = LLMConfig( + api_type="openai", + model=os.environ.get("MODEL_NAME", "gpt-4o-mini"), + api_key=os.environ.get("OPENAI_API_KEY"), +) + +# Create AssistantAgent inside llm_config context +with llm_config: + assistant = AssistantAgent( + name="Calculator_Assistant", + system_message=( + "You are a helpful AI assistant that can perform arithmetic calculations. " + "Use the calculator tool when math operations are needed. " + "Return 'TERMINATE' when the task is done." + ), + ) + +# UserProxyAgent does not use LLM — created outside context +user_proxy = UserProxyAgent( + name="User", + human_input_mode="NEVER", + is_termination_msg=lambda msg: ( + msg.get("content") is not None and "TERMINATE" in msg["content"] + ), + code_execution_config=False, +) + +# AG2 tool registration pattern: +# @agent.register_for_llm(description="...") — exposes tool schema to the LLM +# @user_proxy.register_for_execution() — tells user_proxy to execute it + + +@assistant.register_for_llm(description="A simple calculator for arithmetic operations.") +@user_proxy.register_for_execution() +def calculator_tool( + a: Annotated[int, "First operand"], + b: Annotated[int, "Second operand"], + operator: Annotated[Operator, "Arithmetic operator"], +) -> int: + return calculator(a, b, operator) + + +if __name__ == "__main__": + chat_result = user_proxy.initiate_chat( + assistant, + message="What is (44232 + 13312 / (232 - 32)) * 5?", + ) + print(f"\nResult: {chat_result.summary}") diff --git a/src/praisonai/tests/unit/test_ag2_adapter.py b/src/praisonai/tests/unit/test_ag2_adapter.py new file mode 100644 index 000000000..7e04a89a2 --- /dev/null +++ b/src/praisonai/tests/unit/test_ag2_adapter.py @@ -0,0 +1,542 @@ +""" +Unit tests for the AG2 framework integration in PraisonAI. + +Tests cover: +- AG2 availability detection (AG2_AVAILABLE flag) +- Framework validation in AgentsGenerator.__init__ +- _run_ag2 method: config parsing, agent creation, tool handling +- LLMConfig construction (OpenAI and Bedrock paths) + +All external calls are mocked — no real LLM API calls made. +""" + +import pytest +import os +import sys +from unittest.mock import patch, MagicMock, call +import importlib + +# Ensure src is on path +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../../")) + +# Stub heavy dependencies that auto.py (develop branch) imports at module level +# so that tests can import praisonai without a full installation. +for _stub in ("instructor",): + if _stub not in sys.modules: + sys.modules[_stub] = MagicMock() + +# openai is installed (required by ag2/autogen internals), but auto.py also +# imports it at module level. Ensure it's really loaded, not a mock. +import importlib as _importlib +if "openai" not in sys.modules: + try: + _importlib.import_module("openai") + except ImportError: + _mock_openai = MagicMock() + _mock_openai.__version__ = "1.0.0" + sys.modules["openai"] = _mock_openai + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + +def _make_config(roles=None, framework="ag2", topic="Test topic"): + """Build a minimal agents config dict.""" + if roles is None: + roles = { + "researcher": { + "role": "Researcher", + "goal": "Research things", + "backstory": "You are a researcher.", + "tasks": { + "task1": { + "description": "Research {topic}", + "expected_output": "A research report", + } + }, + "tools": [], + } + } + return {"framework": framework, "topic": topic, "roles": roles} + + +# --------------------------------------------------------------------------- +# AG2_AVAILABLE flag detection +# --------------------------------------------------------------------------- + +class TestAG2AvailabilityFlag: + + def test_ag2_available_false_when_not_installed(self): + """AG2_AVAILABLE is False when 'ag2' distribution is not found.""" + import importlib.metadata as meta + original_fn = meta.distribution + + def raise_not_found(name): + if name == 'ag2': + raise meta.PackageNotFoundError('ag2') + return original_fn(name) + + # Verify detection logic directly without relying on cached module state + ag2_detected = True + try: + with patch('importlib.metadata.distribution', side_effect=raise_not_found): + meta.distribution('ag2') + except meta.PackageNotFoundError: + ag2_detected = False + assert ag2_detected is False + + def test_ag2_available_true_when_installed(self): + """AG2_AVAILABLE is True when 'ag2' distribution and LLMConfig are present.""" + import importlib.metadata as meta + try: + dist = meta.distribution('ag2') + assert dist is not None + from autogen import LLMConfig # noqa: F401 + except (meta.PackageNotFoundError, ImportError): + pytest.skip("ag2 not installed — skipping") + + +# --------------------------------------------------------------------------- +# AgentsGenerator.__init__ framework validation +# --------------------------------------------------------------------------- + +class TestAgentsGeneratorAG2Validation: + + def _make_generator(self, framework, ag2_available=True): + """Create AgentsGenerator with mocked availability flags.""" + with patch("praisonai.agents_generator.AG2_AVAILABLE", ag2_available), \ + patch("praisonai.agents_generator.CREWAI_AVAILABLE", False), \ + patch("praisonai.agents_generator.AUTOGEN_AVAILABLE", False), \ + patch("praisonai.agents_generator.PRAISONAI_AVAILABLE", True): + from praisonai.agents_generator import AgentsGenerator + return AgentsGenerator( + agent_file="agents.yaml", + framework=framework, + config_list=[{"model": "gpt-4o-mini", "api_key": "test-key"}], + ) + + def test_ag2_framework_accepted_when_available(self): + """No ImportError when framework='ag2' and AG2 is installed.""" + gen = self._make_generator("ag2", ag2_available=True) + assert gen.framework == "ag2" + + def test_ag2_framework_raises_when_not_available(self): + """ImportError raised with helpful message when ag2 not installed.""" + with pytest.raises(ImportError, match="AG2 is not installed"): + self._make_generator("ag2", ag2_available=False) + + def test_autogen_framework_unaffected(self): + """Existing autogen framework path still works independently.""" + with patch("praisonai.agents_generator.AG2_AVAILABLE", False), \ + patch("praisonai.agents_generator.AUTOGEN_AVAILABLE", True), \ + patch("praisonai.agents_generator.CREWAI_AVAILABLE", False), \ + patch("praisonai.agents_generator.PRAISONAI_AVAILABLE", True): + from praisonai.agents_generator import AgentsGenerator + gen = AgentsGenerator( + agent_file="agents.yaml", + framework="autogen", + config_list=[{"model": "gpt-4o-mini", "api_key": "test-key"}], + ) + assert gen.framework == "autogen" + + +# --------------------------------------------------------------------------- +# _run_ag2: LLMConfig construction +# --------------------------------------------------------------------------- + +class TestRunAG2LLMConfig: + + def _make_gen_with_config(self, config_list): + """Build an AgentsGenerator instance with given config_list.""" + with patch("praisonai.agents_generator.AG2_AVAILABLE", True), \ + patch("praisonai.agents_generator.CREWAI_AVAILABLE", False), \ + patch("praisonai.agents_generator.AUTOGEN_AVAILABLE", False), \ + patch("praisonai.agents_generator.PRAISONAI_AVAILABLE", True): + from praisonai.agents_generator import AgentsGenerator + return AgentsGenerator( + agent_file="agents.yaml", + framework="ag2", + config_list=config_list, + ) + + @patch("praisonai.agents_generator.AG2_AVAILABLE", True) + def test_openai_llm_config_constructed(self): + """LLMConfig is built with api_type='openai' for standard config.""" + gen = self._make_gen_with_config([ + {"model": "gpt-4o-mini", "api_key": "sk-test", "api_type": "openai"} + ]) + + mock_llm_config = MagicMock() + mock_llm_config.__enter__ = MagicMock(return_value=mock_llm_config) + mock_llm_config.__exit__ = MagicMock(return_value=False) + + mock_assistant = MagicMock() + mock_user_proxy = MagicMock() + mock_groupchat = MagicMock() + mock_groupchat.messages = [{"name": "Researcher", "content": "Done. TERMINATE", "role": "assistant"}] + mock_manager = MagicMock() + + with patch("praisonai.agents_generator.AG2_AVAILABLE", True), \ + patch("autogen.LLMConfig", return_value=mock_llm_config) as mock_llmcfg, \ + patch("autogen.AssistantAgent", return_value=mock_assistant), \ + patch("autogen.UserProxyAgent", return_value=mock_user_proxy), \ + patch("autogen.GroupChat", return_value=mock_groupchat), \ + patch("autogen.GroupChatManager", return_value=mock_manager): + + config = _make_config() + gen._run_ag2(config, "Test topic", {}) + + mock_llmcfg.assert_called_once() + # LLMConfig is called with a positional dict arg + call_args = mock_llmcfg.call_args[0][0] + assert call_args.get("model") == "gpt-4o-mini" + assert call_args.get("api_key") == "sk-test" + + @patch("praisonai.agents_generator.AG2_AVAILABLE", True) + def test_bedrock_llm_config_constructed(self): + """LLMConfig uses api_type='bedrock' when config specifies bedrock.""" + gen = self._make_gen_with_config([ + { + "model": "anthropic.claude-3-5-sonnet-20241022-v2:0", + "api_type": "bedrock", + } + ]) + + mock_llm_config = MagicMock() + mock_llm_config.__enter__ = MagicMock(return_value=mock_llm_config) + mock_llm_config.__exit__ = MagicMock(return_value=False) + + mock_assistant = MagicMock() + mock_user_proxy = MagicMock() + mock_groupchat = MagicMock() + mock_groupchat.messages = [{"name": "Agent", "content": "Report ready. TERMINATE", "role": "assistant"}] + mock_manager = MagicMock() + + with patch("autogen.LLMConfig", return_value=mock_llm_config) as mock_llmcfg, \ + patch("autogen.AssistantAgent", return_value=mock_assistant), \ + patch("autogen.UserProxyAgent", return_value=mock_user_proxy), \ + patch("autogen.GroupChat", return_value=mock_groupchat), \ + patch("autogen.GroupChatManager", return_value=mock_manager): + + config = _make_config() + gen._run_ag2(config, "AWS deployment", {}) + + mock_llmcfg.assert_called_once() + # LLMConfig is called with a positional dict arg + call_args = mock_llmcfg.call_args[0][0] + assert call_args.get("api_type") == "bedrock" + assert "api_key" not in call_args # no api_key for bedrock + + +# --------------------------------------------------------------------------- +# _run_ag2: agent and GroupChat creation +# --------------------------------------------------------------------------- + +class TestRunAG2AgentCreation: + + def _make_gen(self): + with patch("praisonai.agents_generator.AG2_AVAILABLE", True), \ + patch("praisonai.agents_generator.CREWAI_AVAILABLE", False), \ + patch("praisonai.agents_generator.AUTOGEN_AVAILABLE", False), \ + patch("praisonai.agents_generator.PRAISONAI_AVAILABLE", True): + from praisonai.agents_generator import AgentsGenerator + return AgentsGenerator( + agent_file="agents.yaml", + framework="ag2", + config_list=[{"model": "gpt-4o-mini", "api_key": "sk-test"}], + ) + + def test_assistant_created_per_role(self): + """One AssistantAgent is created for each role in the config.""" + gen = self._make_gen() + config = _make_config(roles={ + "role_a": { + "role": "Agent A", "goal": "Goal A", "backstory": "Backstory A", + "tasks": {"t1": {"description": "Do A", "expected_output": "A done"}}, + "tools": [], + }, + "role_b": { + "role": "Agent B", "goal": "Goal B", "backstory": "Backstory B", + "tasks": {"t2": {"description": "Do B", "expected_output": "B done"}}, + "tools": [], + }, + }) + + mock_llm_config = MagicMock() + mock_llm_config.__enter__ = MagicMock(return_value=mock_llm_config) + mock_llm_config.__exit__ = MagicMock(return_value=False) + + created_agents = [] + + def fake_assistant(**kwargs): + m = MagicMock() + m.name = kwargs.get("name", "agent") + created_agents.append(m) + return m + + mock_user_proxy = MagicMock() + mock_groupchat = MagicMock() + mock_groupchat.messages = [{"name": "Agent A", "content": "Done. TERMINATE", "role": "assistant"}] + mock_manager = MagicMock() + + with patch("autogen.LLMConfig", return_value=mock_llm_config), \ + patch("autogen.AssistantAgent", side_effect=fake_assistant), \ + patch("autogen.UserProxyAgent", return_value=mock_user_proxy), \ + patch("autogen.GroupChat", return_value=mock_groupchat), \ + patch("autogen.GroupChatManager", return_value=mock_manager): + + gen._run_ag2(config, "Test", {}) + + assert len(created_agents) == 2 + + def test_groupchat_includes_user_proxy_and_assistants(self): + """GroupChat receives user_proxy + all assistants.""" + gen = self._make_gen() + config = _make_config() + + mock_llm_config = MagicMock() + mock_llm_config.__enter__ = MagicMock(return_value=mock_llm_config) + mock_llm_config.__exit__ = MagicMock(return_value=False) + + mock_assistant = MagicMock() + mock_assistant.name = "Researcher" + mock_user_proxy = MagicMock() + mock_user_proxy.name = "User" + mock_groupchat = MagicMock() + mock_groupchat.messages = [{"name": "Researcher", "content": "Done. TERMINATE", "role": "assistant"}] + mock_manager = MagicMock() + + groupchat_call_args = {} + + def capture_groupchat(**kwargs): + groupchat_call_args.update(kwargs) + return mock_groupchat + + with patch("autogen.LLMConfig", return_value=mock_llm_config), \ + patch("autogen.AssistantAgent", return_value=mock_assistant), \ + patch("autogen.UserProxyAgent", return_value=mock_user_proxy), \ + patch("autogen.GroupChat", side_effect=capture_groupchat), \ + patch("autogen.GroupChatManager", return_value=mock_manager): + + gen._run_ag2(config, "Test", {}) + + agents_in_groupchat = groupchat_call_args.get("agents", []) + assert mock_user_proxy in agents_in_groupchat + assert mock_assistant in agents_in_groupchat + + def test_empty_roles_returns_no_agents_message(self): + """Returns a clear message when no roles are defined in config.""" + gen = self._make_gen() + config = _make_config(roles={}) + + mock_llm_config = MagicMock() + mock_llm_config.__enter__ = MagicMock(return_value=mock_llm_config) + mock_llm_config.__exit__ = MagicMock(return_value=False) + mock_user_proxy = MagicMock() + + with patch("autogen.LLMConfig", return_value=mock_llm_config), \ + patch("autogen.AssistantAgent"), \ + patch("autogen.UserProxyAgent", return_value=mock_user_proxy): + + result = gen._run_ag2(config, "Test", {}) + + assert "No agents" in result + + +# --------------------------------------------------------------------------- +# _run_ag2: system message composition +# --------------------------------------------------------------------------- + +class TestRunAG2SystemMessage: + + def _make_gen(self): + with patch("praisonai.agents_generator.AG2_AVAILABLE", True), \ + patch("praisonai.agents_generator.CREWAI_AVAILABLE", False), \ + patch("praisonai.agents_generator.AUTOGEN_AVAILABLE", False), \ + patch("praisonai.agents_generator.PRAISONAI_AVAILABLE", True): + from praisonai.agents_generator import AgentsGenerator + return AgentsGenerator( + agent_file="agents.yaml", + framework="ag2", + config_list=[{"model": "gpt-4o-mini", "api_key": "sk-test"}], + ) + + def test_system_message_contains_backstory(self): + """AssistantAgent system_message includes the backstory from YAML.""" + gen = self._make_gen() + config = _make_config(roles={ + "agent1": { + "role": "Expert", + "goal": "Help users", + "backstory": "A unique backstory for testing purposes.", + "tasks": {"t": {"description": "Do it", "expected_output": "Done"}}, + "tools": [], + } + }) + + mock_llm_config = MagicMock() + mock_llm_config.__enter__ = MagicMock(return_value=mock_llm_config) + mock_llm_config.__exit__ = MagicMock(return_value=False) + mock_user_proxy = MagicMock() + mock_groupchat = MagicMock() + mock_groupchat.messages = [{"name": "Expert", "content": "Done. TERMINATE", "role": "assistant"}] + mock_manager = MagicMock() + + assistant_kwargs = {} + + def capture_assistant(**kwargs): + assistant_kwargs.update(kwargs) + m = MagicMock() + m.name = kwargs.get("name", "agent") + return m + + with patch("autogen.LLMConfig", return_value=mock_llm_config), \ + patch("autogen.AssistantAgent", side_effect=capture_assistant), \ + patch("autogen.UserProxyAgent", return_value=mock_user_proxy), \ + patch("autogen.GroupChat", return_value=mock_groupchat), \ + patch("autogen.GroupChatManager", return_value=mock_manager): + + gen._run_ag2(config, "Test", {}) + + assert "A unique backstory for testing purposes." in assistant_kwargs.get("system_message", "") + assert "TERMINATE" in assistant_kwargs.get("system_message", "") + + def test_agent_name_sanitised(self): + """Agent names with special characters are sanitised for AG2 compatibility.""" + gen = self._make_gen() + config = _make_config(roles={ + "agent1": { + "role": "AI & ML Expert (2024)", # contains special chars + "goal": "Help", + "backstory": "Expert.", + "tasks": {"t": {"description": "Do it", "expected_output": "Done"}}, + "tools": [], + } + }) + + mock_llm_config = MagicMock() + mock_llm_config.__enter__ = MagicMock(return_value=mock_llm_config) + mock_llm_config.__exit__ = MagicMock(return_value=False) + mock_user_proxy = MagicMock() + mock_groupchat = MagicMock() + mock_groupchat.messages = [{"name": "AI___ML_Expert__2024_", "content": "Done. TERMINATE", "role": "assistant"}] + mock_manager = MagicMock() + created_name = {} + + def capture_assistant(**kwargs): + created_name["name"] = kwargs.get("name", "") + m = MagicMock() + m.name = created_name["name"] + return m + + with patch("autogen.LLMConfig", return_value=mock_llm_config), \ + patch("autogen.AssistantAgent", side_effect=capture_assistant), \ + patch("autogen.UserProxyAgent", return_value=mock_user_proxy), \ + patch("autogen.GroupChat", return_value=mock_groupchat), \ + patch("autogen.GroupChatManager", return_value=mock_manager): + + gen._run_ag2(config, "Test", {}) + + name = created_name.get("name", "") + # Should not contain special chars that would break AG2 + import re + assert re.match(r"^[a-zA-Z0-9_\-]+$", name), f"Name '{name}' contains invalid characters" + + +# --------------------------------------------------------------------------- +# _run_ag2: output extraction +# --------------------------------------------------------------------------- + +class TestRunAG2OutputExtraction: + + def _make_gen(self): + with patch("praisonai.agents_generator.AG2_AVAILABLE", True), \ + patch("praisonai.agents_generator.CREWAI_AVAILABLE", False), \ + patch("praisonai.agents_generator.AUTOGEN_AVAILABLE", False), \ + patch("praisonai.agents_generator.PRAISONAI_AVAILABLE", True): + from praisonai.agents_generator import AgentsGenerator + return AgentsGenerator( + agent_file="agents.yaml", + framework="ag2", + config_list=[{"model": "gpt-4o-mini", "api_key": "sk-test"}], + ) + + def _run_with_messages(self, messages): + gen = self._make_gen() + config = _make_config() + + mock_llm_config = MagicMock() + mock_assistant = MagicMock() + mock_assistant.name = "Researcher" + mock_user_proxy = MagicMock() + # chat_result with no summary so extraction falls back to messages + mock_chat_result = MagicMock() + mock_chat_result.summary = None + mock_user_proxy.initiate_chat.return_value = mock_chat_result + mock_groupchat = MagicMock() + mock_groupchat.messages = messages + mock_manager = MagicMock() + + with patch("autogen.LLMConfig", return_value=mock_llm_config), \ + patch("autogen.AssistantAgent", return_value=mock_assistant), \ + patch("autogen.UserProxyAgent", return_value=mock_user_proxy), \ + patch("autogen.GroupChat", return_value=mock_groupchat), \ + patch("autogen.GroupChatManager", return_value=mock_manager): + + return gen._run_ag2(config, "Test", {}) + + def test_output_prefixed_with_ag2_header(self): + """Result always starts with '### AG2 Output ###'.""" + result = self._run_with_messages([ + {"name": "Researcher", "content": "Here are my findings. TERMINATE", "role": "assistant"} + ]) + assert result.startswith("### AG2 Output ###") + + def test_terminate_marker_stripped_from_output(self): + """TERMINATE keyword is removed from the extracted output.""" + result = self._run_with_messages([ + {"name": "Researcher", "content": "Detailed findings here. TERMINATE", "role": "assistant"} + ]) + assert "TERMINATE" not in result + assert "Detailed findings here" in result + + def test_user_messages_skipped_in_extraction(self): + """User proxy messages are not included in the extracted output.""" + result = self._run_with_messages([ + {"name": "User", "content": "This is the user message", "role": "user"}, + {"name": "Researcher", "content": "This is the agent response. TERMINATE", "role": "assistant"}, + ]) + assert "This is the user message" not in result + assert "This is the agent response" in result + + def test_execution_error_returns_error_message(self): + """Exception during initiate_chat returns a '### AG2 Error ###' message.""" + gen = self._make_gen() + config = _make_config() + + mock_llm_config = MagicMock() + mock_llm_config.__enter__ = MagicMock(return_value=mock_llm_config) + mock_llm_config.__exit__ = MagicMock(return_value=False) + mock_assistant = MagicMock() + mock_assistant.name = "Researcher" + mock_user_proxy = MagicMock() + mock_user_proxy.initiate_chat.side_effect = RuntimeError("Connection failed") + mock_groupchat = MagicMock() + mock_groupchat.messages = [] + mock_manager = MagicMock() + + with patch("autogen.LLMConfig", return_value=mock_llm_config), \ + patch("autogen.AssistantAgent", return_value=mock_assistant), \ + patch("autogen.UserProxyAgent", return_value=mock_user_proxy), \ + patch("autogen.GroupChat", return_value=mock_groupchat), \ + patch("autogen.GroupChatManager", return_value=mock_manager): + + result = gen._run_ag2(config, "Test", {}) + + assert "### AG2 Error ###" in result + assert "Connection failed" in result + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) From 56f815c1b176ce39e811d8e00e0886904e8f453e Mon Sep 17 00:00:00 2001 From: Faridun Mirzoev Date: Thu, 26 Mar 2026 09:48:14 -0700 Subject: [PATCH 2/2] fix(ag2): address Qodo review issues - YAML config resolution, OPENAI_API_BASE, patch create=True - Add _resolve() helper to read LLM config from YAML top-level, per-role, config_list, and env vars in priority order (fixes Bedrock YAML config ignored) - Include OPENAI_API_BASE env var in base_url resolution chain - Add create=True to all patch("autogen.*") calls so tests pass without ag2 installed --- src/praisonai/praisonai/agents_generator.py | 30 +++++-- .../integration/ag2/test_ag2_integration.py | 50 +++++------ src/praisonai/tests/unit/test_ag2_adapter.py | 84 +++++++++---------- 3 files changed, 92 insertions(+), 72 deletions(-) diff --git a/src/praisonai/praisonai/agents_generator.py b/src/praisonai/praisonai/agents_generator.py index 8bafc7b50..6066b9ebf 100644 --- a/src/praisonai/praisonai/agents_generator.py +++ b/src/praisonai/praisonai/agents_generator.py @@ -446,10 +446,30 @@ def _run_ag2(self, config, topic, tools_dict): ) model_config = self.config_list[0] if self.config_list else {} - api_type = model_config.get("api_type", "openai").lower() - model_name = model_config.get("model", "gpt-4o-mini") - api_key = model_config.get("api_key") or os.environ.get("OPENAI_API_KEY") - base_url = model_config.get("base_url") or os.environ.get("OPENAI_BASE_URL") + + # Allow YAML top-level llm block to override config_list values + yaml_llm = config.get("llm", {}) or {} + # Also check first role's llm block as a fallback + first_role_llm = {} + for role_details in config.get("roles", {}).values(): + first_role_llm = role_details.get("llm", {}) or {} + break + + # Priority: YAML top-level llm > first role llm > config_list > env vars + def _resolve(key, env_var=None, default=None): + return (yaml_llm.get(key) or first_role_llm.get(key) + or model_config.get(key) + or (os.environ.get(env_var) if env_var else None) + or default) + + api_type = _resolve("api_type", default="openai").lower() + model_name = _resolve("model", default="gpt-4o-mini") + api_key = _resolve("api_key", env_var="OPENAI_API_KEY") + # Fix #3: also check OPENAI_API_BASE for consistency with rest of codebase + base_url = (model_config.get("base_url") + or yaml_llm.get("base_url") + or os.environ.get("OPENAI_BASE_URL") + or os.environ.get("OPENAI_API_BASE")) # Build LLMConfig — pass a config dict; Bedrock needs no api_key if api_type == "bedrock": @@ -458,7 +478,7 @@ def _run_ag2(self, config, topic, tools_dict): llm_config_entry = {"model": model_name} if api_key: llm_config_entry["api_key"] = api_key - if base_url and base_url != "https://api.openai.com/v1": + if base_url and base_url not in ("https://api.openai.com/v1", "https://api.openai.com/v1/"): llm_config_entry["base_url"] = base_url llm_config = LLMConfig(llm_config_entry) diff --git a/src/praisonai/tests/integration/ag2/test_ag2_integration.py b/src/praisonai/tests/integration/ag2/test_ag2_integration.py index 1e63cafaa..7f1b444cb 100644 --- a/src/praisonai/tests/integration/ag2/test_ag2_integration.py +++ b/src/praisonai/tests/integration/ag2/test_ag2_integration.py @@ -208,11 +208,11 @@ def test_single_agent_run_returns_ag2_output(self, mock_ag2_classes): config_list=[{"model": "gpt-4o-mini", "api_key": "sk-test"}], ) - with patch("autogen.LLMConfig", return_value=m["llm_config"]), \ - patch("autogen.AssistantAgent", return_value=m["assistant"]), \ - patch("autogen.UserProxyAgent", return_value=m["user_proxy"]), \ - patch("autogen.GroupChat", return_value=m["groupchat"]), \ - patch("autogen.GroupChatManager", return_value=m["manager"]): + with patch("autogen.LLMConfig", create=True, return_value=m["llm_config"]), \ + patch("autogen.AssistantAgent", create=True, return_value=m["assistant"]), \ + patch("autogen.UserProxyAgent", create=True, return_value=m["user_proxy"]), \ + patch("autogen.GroupChat", create=True, return_value=m["groupchat"]), \ + patch("autogen.GroupChatManager", create=True, return_value=m["manager"]): result = gen._run_ag2(config, "Write a poem", {}) @@ -254,11 +254,11 @@ def test_single_agent_calls_initiate_chat(self, mock_ag2_classes): config_list=[{"model": "gpt-4o-mini", "api_key": "sk-test"}], ) - with patch("autogen.LLMConfig", return_value=m["llm_config"]), \ - patch("autogen.AssistantAgent", return_value=m["assistant"]), \ - patch("autogen.UserProxyAgent", return_value=m["user_proxy"]), \ - patch("autogen.GroupChat", return_value=m["groupchat"]), \ - patch("autogen.GroupChatManager", return_value=m["manager"]): + with patch("autogen.LLMConfig", create=True, return_value=m["llm_config"]), \ + patch("autogen.AssistantAgent", create=True, return_value=m["assistant"]), \ + patch("autogen.UserProxyAgent", create=True, return_value=m["user_proxy"]), \ + patch("autogen.GroupChat", create=True, return_value=m["groupchat"]), \ + patch("autogen.GroupChatManager", create=True, return_value=m["manager"]): gen._run_ag2(config, "Test topic", {}) @@ -321,11 +321,11 @@ def count_assistant(**kwargs): config_list=[{"model": "gpt-4o-mini", "api_key": "sk-test"}], ) - with patch("autogen.LLMConfig", return_value=m["llm_config"]), \ - patch("autogen.AssistantAgent", side_effect=count_assistant), \ - patch("autogen.UserProxyAgent", return_value=m["user_proxy"]), \ - patch("autogen.GroupChat", return_value=m["groupchat"]), \ - patch("autogen.GroupChatManager", return_value=m["manager"]): + with patch("autogen.LLMConfig", create=True, return_value=m["llm_config"]), \ + patch("autogen.AssistantAgent", create=True, side_effect=count_assistant), \ + patch("autogen.UserProxyAgent", create=True, return_value=m["user_proxy"]), \ + patch("autogen.GroupChat", create=True, return_value=m["groupchat"]), \ + patch("autogen.GroupChatManager", create=True, return_value=m["manager"]): gen._run_ag2(config, "Explain open-source AI", {}) @@ -368,11 +368,11 @@ def capture_groupchat(**kwargs): config_list=[{"model": "gpt-4o-mini", "api_key": "sk-test"}], ) - with patch("autogen.LLMConfig", return_value=m["llm_config"]), \ - patch("autogen.AssistantAgent", return_value=m["assistant"]), \ - patch("autogen.UserProxyAgent", return_value=m["user_proxy"]), \ - patch("autogen.GroupChat", side_effect=capture_groupchat), \ - patch("autogen.GroupChatManager", return_value=m["manager"]): + with patch("autogen.LLMConfig", create=True, return_value=m["llm_config"]), \ + patch("autogen.AssistantAgent", create=True, return_value=m["assistant"]), \ + patch("autogen.UserProxyAgent", create=True, return_value=m["user_proxy"]), \ + patch("autogen.GroupChat", create=True, side_effect=capture_groupchat), \ + patch("autogen.GroupChatManager", create=True, return_value=m["manager"]): gen._run_ag2(config, "Test", {}) @@ -461,11 +461,11 @@ def test_ag2_framework_dispatches_to_run_ag2(self, mock_ag2_classes): }) with patch.object(gen, "_run_autogen") as mock_autogen, \ - patch("autogen.LLMConfig", return_value=m["llm_config"]), \ - patch("autogen.AssistantAgent", return_value=m["assistant"]), \ - patch("autogen.UserProxyAgent", return_value=m["user_proxy"]), \ - patch("autogen.GroupChat", return_value=m["groupchat"]), \ - patch("autogen.GroupChatManager", return_value=m["manager"]): + patch("autogen.LLMConfig", create=True, return_value=m["llm_config"]), \ + patch("autogen.AssistantAgent", create=True, return_value=m["assistant"]), \ + patch("autogen.UserProxyAgent", create=True, return_value=m["user_proxy"]), \ + patch("autogen.GroupChat", create=True, return_value=m["groupchat"]), \ + patch("autogen.GroupChatManager", create=True, return_value=m["manager"]): result = gen.generate_crew_and_kickoff() diff --git a/src/praisonai/tests/unit/test_ag2_adapter.py b/src/praisonai/tests/unit/test_ag2_adapter.py index 7e04a89a2..51c22873e 100644 --- a/src/praisonai/tests/unit/test_ag2_adapter.py +++ b/src/praisonai/tests/unit/test_ag2_adapter.py @@ -178,11 +178,11 @@ def test_openai_llm_config_constructed(self): mock_manager = MagicMock() with patch("praisonai.agents_generator.AG2_AVAILABLE", True), \ - patch("autogen.LLMConfig", return_value=mock_llm_config) as mock_llmcfg, \ - patch("autogen.AssistantAgent", return_value=mock_assistant), \ - patch("autogen.UserProxyAgent", return_value=mock_user_proxy), \ - patch("autogen.GroupChat", return_value=mock_groupchat), \ - patch("autogen.GroupChatManager", return_value=mock_manager): + patch("autogen.LLMConfig", create=True, return_value=mock_llm_config) as mock_llmcfg, \ + patch("autogen.AssistantAgent", create=True, return_value=mock_assistant), \ + patch("autogen.UserProxyAgent", create=True, return_value=mock_user_proxy), \ + patch("autogen.GroupChat", create=True, return_value=mock_groupchat), \ + patch("autogen.GroupChatManager", create=True, return_value=mock_manager): config = _make_config() gen._run_ag2(config, "Test topic", {}) @@ -213,11 +213,11 @@ def test_bedrock_llm_config_constructed(self): mock_groupchat.messages = [{"name": "Agent", "content": "Report ready. TERMINATE", "role": "assistant"}] mock_manager = MagicMock() - with patch("autogen.LLMConfig", return_value=mock_llm_config) as mock_llmcfg, \ - patch("autogen.AssistantAgent", return_value=mock_assistant), \ - patch("autogen.UserProxyAgent", return_value=mock_user_proxy), \ - patch("autogen.GroupChat", return_value=mock_groupchat), \ - patch("autogen.GroupChatManager", return_value=mock_manager): + with patch("autogen.LLMConfig", create=True, return_value=mock_llm_config) as mock_llmcfg, \ + patch("autogen.AssistantAgent", create=True, return_value=mock_assistant), \ + patch("autogen.UserProxyAgent", create=True, return_value=mock_user_proxy), \ + patch("autogen.GroupChat", create=True, return_value=mock_groupchat), \ + patch("autogen.GroupChatManager", create=True, return_value=mock_manager): config = _make_config() gen._run_ag2(config, "AWS deployment", {}) @@ -280,11 +280,11 @@ def fake_assistant(**kwargs): mock_groupchat.messages = [{"name": "Agent A", "content": "Done. TERMINATE", "role": "assistant"}] mock_manager = MagicMock() - with patch("autogen.LLMConfig", return_value=mock_llm_config), \ - patch("autogen.AssistantAgent", side_effect=fake_assistant), \ - patch("autogen.UserProxyAgent", return_value=mock_user_proxy), \ - patch("autogen.GroupChat", return_value=mock_groupchat), \ - patch("autogen.GroupChatManager", return_value=mock_manager): + with patch("autogen.LLMConfig", create=True, return_value=mock_llm_config), \ + patch("autogen.AssistantAgent", create=True, side_effect=fake_assistant), \ + patch("autogen.UserProxyAgent", create=True, return_value=mock_user_proxy), \ + patch("autogen.GroupChat", create=True, return_value=mock_groupchat), \ + patch("autogen.GroupChatManager", create=True, return_value=mock_manager): gen._run_ag2(config, "Test", {}) @@ -313,11 +313,11 @@ def capture_groupchat(**kwargs): groupchat_call_args.update(kwargs) return mock_groupchat - with patch("autogen.LLMConfig", return_value=mock_llm_config), \ - patch("autogen.AssistantAgent", return_value=mock_assistant), \ - patch("autogen.UserProxyAgent", return_value=mock_user_proxy), \ - patch("autogen.GroupChat", side_effect=capture_groupchat), \ - patch("autogen.GroupChatManager", return_value=mock_manager): + with patch("autogen.LLMConfig", create=True, return_value=mock_llm_config), \ + patch("autogen.AssistantAgent", create=True, return_value=mock_assistant), \ + patch("autogen.UserProxyAgent", create=True, return_value=mock_user_proxy), \ + patch("autogen.GroupChat", create=True, side_effect=capture_groupchat), \ + patch("autogen.GroupChatManager", create=True, return_value=mock_manager): gen._run_ag2(config, "Test", {}) @@ -335,9 +335,9 @@ def test_empty_roles_returns_no_agents_message(self): mock_llm_config.__exit__ = MagicMock(return_value=False) mock_user_proxy = MagicMock() - with patch("autogen.LLMConfig", return_value=mock_llm_config), \ + with patch("autogen.LLMConfig", create=True, return_value=mock_llm_config), \ patch("autogen.AssistantAgent"), \ - patch("autogen.UserProxyAgent", return_value=mock_user_proxy): + patch("autogen.UserProxyAgent", create=True, return_value=mock_user_proxy): result = gen._run_ag2(config, "Test", {}) @@ -391,11 +391,11 @@ def capture_assistant(**kwargs): m.name = kwargs.get("name", "agent") return m - with patch("autogen.LLMConfig", return_value=mock_llm_config), \ - patch("autogen.AssistantAgent", side_effect=capture_assistant), \ - patch("autogen.UserProxyAgent", return_value=mock_user_proxy), \ - patch("autogen.GroupChat", return_value=mock_groupchat), \ - patch("autogen.GroupChatManager", return_value=mock_manager): + with patch("autogen.LLMConfig", create=True, return_value=mock_llm_config), \ + patch("autogen.AssistantAgent", create=True, side_effect=capture_assistant), \ + patch("autogen.UserProxyAgent", create=True, return_value=mock_user_proxy), \ + patch("autogen.GroupChat", create=True, return_value=mock_groupchat), \ + patch("autogen.GroupChatManager", create=True, return_value=mock_manager): gen._run_ag2(config, "Test", {}) @@ -430,11 +430,11 @@ def capture_assistant(**kwargs): m.name = created_name["name"] return m - with patch("autogen.LLMConfig", return_value=mock_llm_config), \ - patch("autogen.AssistantAgent", side_effect=capture_assistant), \ - patch("autogen.UserProxyAgent", return_value=mock_user_proxy), \ - patch("autogen.GroupChat", return_value=mock_groupchat), \ - patch("autogen.GroupChatManager", return_value=mock_manager): + with patch("autogen.LLMConfig", create=True, return_value=mock_llm_config), \ + patch("autogen.AssistantAgent", create=True, side_effect=capture_assistant), \ + patch("autogen.UserProxyAgent", create=True, return_value=mock_user_proxy), \ + patch("autogen.GroupChat", create=True, return_value=mock_groupchat), \ + patch("autogen.GroupChatManager", create=True, return_value=mock_manager): gen._run_ag2(config, "Test", {}) @@ -478,11 +478,11 @@ def _run_with_messages(self, messages): mock_groupchat.messages = messages mock_manager = MagicMock() - with patch("autogen.LLMConfig", return_value=mock_llm_config), \ - patch("autogen.AssistantAgent", return_value=mock_assistant), \ - patch("autogen.UserProxyAgent", return_value=mock_user_proxy), \ - patch("autogen.GroupChat", return_value=mock_groupchat), \ - patch("autogen.GroupChatManager", return_value=mock_manager): + with patch("autogen.LLMConfig", create=True, return_value=mock_llm_config), \ + patch("autogen.AssistantAgent", create=True, return_value=mock_assistant), \ + patch("autogen.UserProxyAgent", create=True, return_value=mock_user_proxy), \ + patch("autogen.GroupChat", create=True, return_value=mock_groupchat), \ + patch("autogen.GroupChatManager", create=True, return_value=mock_manager): return gen._run_ag2(config, "Test", {}) @@ -526,11 +526,11 @@ def test_execution_error_returns_error_message(self): mock_groupchat.messages = [] mock_manager = MagicMock() - with patch("autogen.LLMConfig", return_value=mock_llm_config), \ - patch("autogen.AssistantAgent", return_value=mock_assistant), \ - patch("autogen.UserProxyAgent", return_value=mock_user_proxy), \ - patch("autogen.GroupChat", return_value=mock_groupchat), \ - patch("autogen.GroupChatManager", return_value=mock_manager): + with patch("autogen.LLMConfig", create=True, return_value=mock_llm_config), \ + patch("autogen.AssistantAgent", create=True, return_value=mock_assistant), \ + patch("autogen.UserProxyAgent", create=True, return_value=mock_user_proxy), \ + patch("autogen.GroupChat", create=True, return_value=mock_groupchat), \ + patch("autogen.GroupChatManager", create=True, return_value=mock_manager): result = gen._run_ag2(config, "Test", {})