Skip to content

Commit 97feb97

Browse files
committed
Enhance AG2 Async Agent functionality and documentation
1 parent b69634f commit 97feb97

8 files changed

Lines changed: 678 additions & 465 deletions

File tree

agentops/instrumentation/agentic/ag2/instrumentor.py

Lines changed: 2 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -56,12 +56,10 @@ def _create_metrics(self, meter: Meter) -> Dict[str, Any]:
5656

5757
def _initialize(self, **kwargs):
5858
"""Initialize attribute manager and AG2-specific concurrent.futures instrumentation."""
59-
logger.debug("[AG2 DEBUG] Initializing AG2 instrumentor...")
6059
self._attribute_manager = SpanAttributeManager(service_name="agentops", deployment_environment="production")
6160

6261
def _custom_wrap(self, **kwargs):
6362
"""Perform custom wrapping for AG2 methods."""
64-
logger.debug("[AG2 DEBUG] Starting custom wrapping for AG2 methods...")
6563

6664
methods_to_wrap = [
6765
("autogen.agentchat.conversable_agent", "ConversableAgent.__init__", self._agent_init_wrapper),
@@ -96,11 +94,9 @@ def _custom_wrap(self, **kwargs):
9694

9795
for module, method, wrapper_factory in methods_to_wrap:
9896
try:
99-
logger.debug(f"[AG2 DEBUG] Attempting to wrap: {module}.{method}")
10097
wrap_function_wrapper(module, method, wrapper_factory(self._tracer))
101-
logger.debug(f"[AG2 DEBUG] Successfully wrapped {method}")
10298
except (AttributeError, ModuleNotFoundError) as e:
103-
logger.debug(f"[AG2 DEBUG] Failed to wrap {method}: {e}")
99+
logger.debug(f"Failed to wrap {method}: {e}")
104100

105101
def _custom_unwrap(self, **kwargs):
106102
"""Remove instrumentation from AG2."""
@@ -609,7 +605,6 @@ def context_aware_start(self):
609605
try:
610606
threading.Thread.__init__ = original_thread_init
611607
threading.Thread.start = original_thread_start
612-
logger.debug("[AG2 DEBUG] Successfully restored original Thread methods")
613608
except Exception as e:
614609
logger.error(f"[AG2 DEBUG] Error restoring Thread methods: {e}")
615610
# Force restore
@@ -668,7 +663,6 @@ def wrapper(wrapped, instance, args, kwargs):
668663

669664
result = wrapped(*args, **kwargs)
670665
self._capture_group_chat_summary(span, instance, result)
671-
logger.debug(f"[AG2 DEBUG] Group chat run completed - Result: {result}")
672666
return result
673667

674668
return wrapper
@@ -677,7 +671,7 @@ def _tool_execution_wrapper(self, tracer, tool_type):
677671
"""Wrapper for capturing tool execution."""
678672

679673
def wrapper(wrapped, instance, args, kwargs):
680-
span_name = f"ag2.tool.{tool_type}"
674+
span_name = f"ag2.tool.{tool_type}.tool_usage"
681675

682676
with create_span(
683677
tracer, span_name, kind=SpanKind.CLIENT, attribute_manager=self._attribute_manager

docs/v2/examples/ag2.mdx

Lines changed: 141 additions & 132 deletions
Original file line numberDiff line numberDiff line change
@@ -1,176 +1,185 @@
11
---
2-
title: 'AG2 with Mem0 Example'
3-
description: 'Observe an AG2 Agent with memory powered by Mem0 using AgentOps'
2+
title: 'AG2'
3+
description: 'AG2 Async Agent Chat'
44
---
5-
{/* SOURCE_FILE: examples/ag2/agentchat_with_memory.ipynb */}
5+
{/* SOURCE_FILE: examples/ag2/async_human_input.ipynb */}
66

7-
_View Notebook on <a href={'https://github.com/AgentOps-AI/agentops/blob/main/examples/ag2/agentchat_with_memory.ipynb'} target={'_blank'}>Github</a>_
7+
_View Notebook on <a href={'https://github.com/AgentOps-AI/agentops/blob/main/examples/ag2/ag2_async_agent.ipynb'} target={'_blank'}>Github</a>_
88

9-
# Observe an Agent with memory powered by Mem0
9+
# AG2 Async Agent Chat with Automated Responses
1010

11-
This notebook demonstrates an intelligent customer service chatbot system that combines:
11+
This notebook demonstrates how to leverage asynchronous programming with AG2 agents
12+
to create automated conversations between AI agents, eliminating the need for human
13+
input while maintaining full traceability.
1214

13-
- AG2 for conversational agents
14-
- Mem0 for memory management
15+
# Overview
16+
This notebook demonstrates a practical example of automated AI-to-AI communication where we:
1517

16-
[Mem0](https://www.mem0.ai/) provides a smart, self-improving memory layer for Large Language Models (LLMs), enabling developers to create personalized AI experiences that evolve with each user interaction. Refer [docs](https://docs.mem0.ai/overview) for more information.
18+
1. Initialize AG2 agents with OpenAI's GPT-4o-mini model
19+
2. Create custom async agents that simulate human-like responses and processing delays
20+
3. Automate the entire conversation flow without requiring manual intervention
21+
4. Track all interactions using AgentOps for monitoring and analysis
1722

18-
The implementation showcases how to initialize agents, manage conversation memory, and facilitate multi-agent conversations for enhanced problem-solving in customer support scenarios.
19-
20-
With AgentOps, you can observe the agent's memory and interactions in real-time, providing insights into how the agent learns and adapts over time.
21-
22-
## Pre-requisites
23-
- AgentOps API key from [AgentOps](https://app.agentops.ai/).
24-
- Mem0 API key from [Mem0 Platform](https://app.mem0.ai/).
25-
- OpenAI API key from [OpenAI](https://platform.openai.com/).
23+
By using async operations and automated responses, you can create fully autonomous
24+
agent conversations that simulate real-world scenarios. This is particularly useful
25+
for testing, prototyping, and creating demos where you want to showcase agent
26+
capabilities without manual input.
2627

2728
## Installation
28-
29-
Install required dependencies:
3029
<CodeGroup>
3130
```bash pip
32-
pip install agentops "ag2[openai]" mem0ai python-dotenv
31+
pip install ag2 agentops nest-asyncio
3332
```
3433
```bash poetry
35-
poetry add agentops ag2 mem0ai python-dotenv
36-
# Note: For ag2[openai] with poetry, you might need to specify openai as an extra or directly.
37-
# poetry add ag2 -E openai
34+
poetry add ag2 agentops nest-asyncio
3835
```
3936
```bash uv
40-
uv add agentops "ag2[openai]" mem0ai python-dotenv
37+
uv add ag2 agentops nest-asyncio
4138
```
4239
</CodeGroup>
4340

44-
## Setup
45-
46-
```python
41+
```
42+
import asyncio
43+
from typing import Dict, Optional, Union
4744
import os
4845
from dotenv import load_dotenv
46+
import nest_asyncio
4947
import agentops
50-
from mem0 import MemoryClient
51-
from autogen import ConversableAgent
48+
from autogen import AssistantAgent
49+
from autogen.agentchat.user_proxy_agent import UserProxyAgent
50+
```
51+
5252

53+
```
54+
# Load environment variables for API keys
5355
load_dotenv()
54-
os.environ["AGENTOPS_API_KEY"] = os.getenv("AGENTOPS_API_KEY", "your_agentops_api_key_here")
56+
os.environ["AGENTOPS_API_KEY"] = os.getenv("AGENTOPS_API_KEY", "your_api_key_here")
5557
os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY", "your_openai_api_key_here")
56-
os.environ["MEM0_API_KEY"] = os.getenv("MEM0_API_KEY", "your_mem0_api_key_here")
58+
# Initialize AgentOps for tracking and monitoring
59+
agentops.init(auto_start_session=False, trace_name="AG2 Async Demo")
60+
tracer = agentops.start_trace(trace_name="AG2 Async Agent Demo", tags=["ag2-async-demo", "agentops-example"])
5761
```
5862

59-
## Initialize Agent and Memory
60-
61-
The conversational agent is set up using the 'gpt-4o' model and a mem0 client. We'll utilize the client's methods for storing and accessing memories.
62-
63-
```python
64-
agentops.init(auto_start_session=False)
65-
tracer = agentops.start_trace(trace_name="AG2 Agent using Mem0", tags=["ag2-mem0-example", "agentops-example"])
6663

67-
agent = ConversableAgent(
68-
"chatbot",
69-
llm_config={"config_list": [{"model": "gpt-4o", "api_key": os.environ.get("OPENAI_API_KEY")}]},
70-
code_execution_config=False,
71-
function_map=None,
72-
human_input_mode="NEVER",
73-
)
74-
75-
memory = MemoryClient()
7664
```
77-
78-
Initialize a conversation history for a Best Buy customer service chatbot. It contains a list of message exchanges between the user and the assistant, structured as dictionaries with 'role' and 'content' keys. The entire conversation is then stored in memory using the `memory.add()` method, associated with the identifier "customer_service_bot".
79-
80-
```python
81-
conversation = [
82-
{
83-
"role": "assistant",
84-
"content": "Hi, I'm Best Buy's chatbot!\n\nThanks for being a My Best Buy TotalTM member.\n\nWhat can I help you with?",
85-
},
86-
{
87-
"role": "user",
88-
"content": 'Seeing horizontal lines on our tv. TV model: Sony - 77" Class BRAVIA XR A80K OLED 4K UHD Smart Google TV',
89-
},
90-
{
91-
"role": "assistant",
92-
"content": "Thanks for being a My Best Buy Total™ member. I can connect you to an expert immediately - just one perk of your membership!\n\nSelect the button below when you're ready to chat.",
93-
},
94-
{
95-
"role": "assistant",
96-
"content": "Good evening, thank you for choosing Best Buy, Fnu. My name is Lovely. I hope you are doing well. I'm sorry to hear that you're seeing horizontal lines on your TV.\n\nI'm absolutely committed to exploring all possible ways to assist you to fix this issue.\n\nTo ensure that we are on the right account, may I please have your email address registered with your Best Buy account?",
97-
},
98-
{"role": "user", "content": "dd@gmail.com"},
99-
{
100-
"role": "assistant",
101-
"content": "Perfect! Thank you for providing all the details, surely you have made my job easier by doing this. I really appreciate it.\n\nI also want to take a moment to express our heartfelt appreciation for your trust and loyalty. Thank you for being an amazing customer of BestBuy Total.\n\nCould you please help me with the order number or product's details to check it quickly?\n\nSamsung - 49\\" Odyssey OLED G9 (G95SC) DQHD 240Hz 0.03ms G-Sync Compatible Curved Smart Gaming Monitor - Silver - just to confirm this is the item, right?",
102-
},
103-
{"role": "user", "content": "Order number: 112217629"},
104-
{
105-
"role": "assistant",
106-
"content": "Superb! Thank you for confirmation.\n\nThank you for your patience. After exploring all possible solutions, I can help you to arrange a home repair appointment for your device. Our Geek Squad experts will visit your home to inspect and fix your device.\n\nIt's great that you have a protection plan - rest assured, we've got your back! As a valued Total member, you can avail this service at a minimal service fee. This fee, applicable to all repairs, covers the cost of diagnosing the issue and any small parts needed for the repair. It's part of our 24-month free protection plan.\n\nPlease click here to review the service fee and plan coverage details -\n\nhttps://www.bestbuy.com/site/best-buy-membership/best-buy-protection/pcmcat1608643232014.c?id=pcmcat1608643232014#jl-servicefees\n\nFnu - just to confirm shall I proceed to schedule the appointment?",
107-
},
108-
{"role": "user", "content": "Yes please"},
109-
{"role": "assistant", "content": "When should I schedule the appointment?"},
110-
{"role": "user", "content": "Schedule it for tomorrow please"},
111-
]
112-
113-
memory.add(messages=conversation, user_id="customer_service_bot")
65+
# Define an asynchronous function that simulates async processing
66+
async def simulate_async_processing(task_name: str, delay: float = 1.0) -> str:
67+
"""
68+
Simulate some asynchronous processing (e.g., API calls, file operations, etc.)
69+
"""
70+
print(f"🔄 Starting async task: {task_name}")
71+
await asyncio.sleep(delay) # Simulate async work
72+
print(f"✅ Completed async task: {task_name}")
73+
return f"Processed: {task_name}"
11474
```
11575

116-
## Agent Inference
117-
118-
We ask a question to the agent, utilizing mem0 to retrieve relevant memories. The agent then formulates a response based on both the question and the retrieved contextual information.
11976

120-
```python
121-
data = "I forgot the order number, can you quickly tell me?"
122-
123-
relevant_memories = memory.search(data, user_id="customer_service_bot")
124-
flatten_relevant_memories = "\n".join([m["memory"] for m in relevant_memories])
125-
126-
prompt = f\"\"\"Answer the user question considering the memories. Keep answers clear and concise.
127-
Memories:
128-
{flatten_relevant_memories}
129-
\n\n
130-
Question: {data}
131-
\"\"\"
132-
133-
reply = agent.generate_reply(messages=[{"content": prompt, "role": "user"}])
134-
print(reply)
77+
```
78+
# Define a custom UserProxyAgent that simulates automated user responses
79+
class AutomatedUserProxyAgent(UserProxyAgent):
80+
def __init__(self, name: str, **kwargs):
81+
super().__init__(name, **kwargs)
82+
self.response_count = 0
83+
self.predefined_responses = [
84+
"Yes, please generate interview questions for these topics.",
85+
"The questions look good. Can you make them more specific to senior-level positions?",
86+
"Perfect! These questions are exactly what we need. Thank you!",
87+
]
88+
89+
async def a_get_human_input(self, prompt: str) -> str:
90+
# Simulate async processing before responding
91+
await simulate_async_processing(f"Processing user input #{self.response_count + 1}")
92+
93+
if self.response_count < len(self.predefined_responses):
94+
response = self.predefined_responses[self.response_count]
95+
self.response_count += 1
96+
print(f"👤 User: {response}")
97+
return response
98+
else:
99+
print("👤 User: TERMINATE")
100+
return "TERMINATE"
101+
102+
async def a_receive(
103+
self,
104+
message: Union[Dict, str],
105+
sender,
106+
request_reply: Optional[bool] = None,
107+
silent: Optional[bool] = False,
108+
):
109+
await super().a_receive(message, sender, request_reply, silent)
135110
```
136111

137-
## Multi Agent Conversation
138-
139-
Initialize two AI agents: a "manager" for resolving customer issues and a "customer_bot" for gathering information on customer problems, both using GPT-4. It then retrieves relevant memories for a given question, combining them with the question into a prompt. This prompt can be used by either the manager or customer_bot to generate a contextually informed response.
140-
141-
```python
142-
manager = ConversableAgent(
143-
"manager",
144-
system_message="You are a manager who helps in resolving customer issues.",
145-
llm_config={"config_list": [{"model": "gpt-4o-mini", "api_key": os.environ.get("OPENAI_API_KEY")}]},
146-
human_input_mode="NEVER",
147-
)
148-
149-
customer_bot = ConversableAgent(
150-
"customer_bot",
151-
system_message="You are a customer service bot who gathers information on issues customers are facing. Keep answers clear and concise.",
152-
llm_config={"config_list": [{"model": "gpt-4", "api_key": os.environ.get("OPENAI_API_KEY")}]},
153-
human_input_mode="NEVER",
154-
)
155112

156-
data = "When is the appointment?"
113+
```
114+
# Define an AssistantAgent that simulates async processing before responding
115+
class AsyncAssistantAgent(AssistantAgent):
116+
async def a_receive(
117+
self,
118+
message: Union[Dict, str],
119+
sender,
120+
request_reply: Optional[bool] = None,
121+
silent: Optional[bool] = False,
122+
):
123+
# Simulate async processing before responding
124+
await simulate_async_processing("Analyzing request and preparing response", 0.5)
125+
await super().a_receive(message, sender, request_reply, silent)
126+
```
157127

158-
relevant_memories = memory.search(data, user_id="customer_service_bot")
159-
flatten_relevant_memories = "\n".join([m["memory"] for m in relevant_memories])
160128

161-
prompt = f\"\"\"
162-
Context:
163-
{flatten_relevant_memories}
164-
\n\n
165-
Question: {data}
166-
\"\"\"
129+
```
130+
async def main():
131+
print("🚀 Starting AG2 Async Demo")
132+
133+
# Create agents with automated behavior
134+
user_proxy = AutomatedUserProxyAgent(
135+
name="hiring_manager",
136+
human_input_mode="NEVER", # No human input required
137+
max_consecutive_auto_reply=3,
138+
code_execution_config=False,
139+
is_termination_msg=lambda msg: "TERMINATE" in str(msg.get("content", "")),
140+
)
141+
142+
assistant = AsyncAssistantAgent(
143+
name="interview_consultant",
144+
system_message="""You are an expert interview consultant. When given interview topics,
145+
you create thoughtful, relevant questions. You ask for feedback and incorporate it.
146+
When the user is satisfied with the questions, end with 'TERMINATE'.""",
147+
llm_config={"config_list": [{"model": "gpt-4o-mini", "api_key": os.environ.get("OPENAI_API_KEY")}]},
148+
is_termination_msg=lambda msg: "TERMINATE" in str(msg.get("content", "")),
149+
)
150+
151+
try:
152+
print("🤖 Initiating automated conversation...")
153+
# Start the automated chat between the user and assistant
154+
await user_proxy.a_initiate_chat(
155+
assistant,
156+
message="""I need help creating interview questions for these topics:
157+
- Resume Review
158+
- Technical Skills Assessment
159+
- Project Discussion
160+
- Job Role Expectations
161+
- Closing Remarks
162+
163+
Please create 2-3 questions for each topic.""",
164+
max_turns=6,
165+
)
166+
except Exception as e:
167+
print(f"\n❌ Error occurred: {e}")
168+
finally:
169+
agentops.end_trace(tracer, end_state="Success")
170+
171+
print("\n🎉 Demo completed successfully!")
172+
```
167173

168-
result = manager.send(prompt, customer_bot, request_reply=True)
169174

170-
agentops.end_trace(tracer, end_state="Success")
171175
```
176+
# Run the main async demo
177+
nest_asyncio.apply()
178+
asyncio.run(main())
179+
```
180+
172181

173182
<script type="module" src="/scripts/github_stars.js"></script>
174183
<script type="module" src="/scripts/scroll-img-fadein-animation.js"></script>
175184
<script type="module" src="/scripts/button_heartbeat_animation.js"></script>
176-
<script type="module" src="/scripts/adjust_api_dynamically.js"></script>
185+
<script type="module" src="/scripts/adjust_api_dynamically.js"></script>

0 commit comments

Comments
 (0)