Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 2 additions & 7 deletions .github/workflows/examples-integration-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -67,12 +67,6 @@ jobs:
- { path: 'examples/ag2/async_human_input.py', name: 'AG2 Async Human Input' }
- { path: 'examples/ag2/tools_wikipedia_search.py', name: 'AG2 Wikipedia Search' }

# Context Manager examples
- { path: 'examples/context_manager/basic_usage.py', name: 'Context Manager Basic' }
- { path: 'examples/context_manager/error_handling.py', name: 'Context Manager Errors' }
- { path: 'examples/context_manager/parallel_traces.py', name: 'Context Manager Parallel' }
- { path: 'examples/context_manager/production_patterns.py', name: 'Context Manager Production' }

# Agno examples
- { path: 'examples/agno/agno_async_operations.py', name: 'Agno Async Operations' }
- { path: 'examples/agno/agno_basic_agents.py', name: 'Agno Basic Agents' }
Expand All @@ -84,7 +78,7 @@ jobs:
- { path: 'examples/google_adk/human_approval.py', name: 'Google ADK Human Approval' }

# LlamaIndex examples
- { path: 'examples/llamaindex/llamaindex_example.py', name: 'LlamaIndex' }
# - { path: 'examples/llamaindex/llamaindex_example.py', name: 'LlamaIndex' }

# Mem0 examples
- { path: 'examples/mem0/mem0_memoryclient_example.py', name: 'Mem0 Memory Client' }
Expand Down Expand Up @@ -157,6 +151,7 @@ jobs:
LLAMA_API_KEY: ${{ secrets.LLAMA_API_KEY }}
PERPLEXITY_API_KEY: ${{ secrets.PERPLEXITY_API_KEY }}
REPLICATE_API_TOKEN: ${{ secrets.REPLICATE_API_TOKEN }}
PINECONE_API_KEY: ${{ secrets.PINECONE_API_KEY }}
PYTHONPATH: ${{ github.workspace }}
run: |
echo "Running ${{ matrix.example.name }}..."
Expand Down
145 changes: 89 additions & 56 deletions examples/ag2/async_human_input.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
# Agent Chat with Async Human Inputs
# Agent Chat with Async Operations
#
# We are going to create an agent that can chat with a human asynchronously. The agent will be able to respond to messages from the human and will also be able to send messages to the human.
# We are going to create agents that can perform asynchronous operations and chat with each other.
# This example demonstrates async capabilities without requiring human input.
#
# We are going to use AgentOps to monitor the agent's performance and observe its interactions with the human.
# We are going to use AgentOps to monitor the agent's performance and observe their interactions.
# # Install required dependencies
# %pip install agentops
# %pip install ag2
Expand All @@ -25,92 +26,124 @@
os.environ["AGENTOPS_API_KEY"] = os.getenv("AGENTOPS_API_KEY", "your_api_key_here")
os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY", "your_openai_api_key_here")

agentops.init(auto_start_session=False, trace_name="AG2 Async Human Input")
tracer = agentops.start_trace(
trace_name="AG2 Agent chat with Async Human Inputs", tags=["ag2-chat-async-human-inputs", "agentops-example"]
)
agentops.init(auto_start_session=False, trace_name="AG2 Async Demo")
tracer = agentops.start_trace(trace_name="AG2 Async Agent Demo", tags=["ag2-async-demo", "agentops-example"])


# Define an asynchronous function that simulates some asynchronous task (e.g., I/O operation)
async def my_asynchronous_function():
print("Start asynchronous function")
await asyncio.sleep(2) # Simulate some asynchronous task (e.g., I/O operation)
print("End asynchronous function")
return "input"
# Define an asynchronous function that simulates async processing
async def simulate_async_processing(task_name: str, delay: float = 1.0) -> str:
"""
Simulate some asynchronous processing (e.g., API calls, file operations, etc.)
"""
print(f"🔄 Starting async task: {task_name}")
await asyncio.sleep(delay) # Simulate async work
print(f"✅ Completed async task: {task_name}")
return f"Processed: {task_name}"


# Define a custom class `CustomisedUserProxyAgent` that extends `UserProxyAgent`
class CustomisedUserProxyAgent(UserProxyAgent):
# Asynchronous function to get human input
# Define a custom UserProxyAgent that simulates automated responses
class AutomatedUserProxyAgent(UserProxyAgent):
def __init__(self, name: str, **kwargs):
super().__init__(name, **kwargs)
self.response_count = 0
self.predefined_responses = [
"Yes, please generate interview questions for these topics.",
"The questions look good. Can you make them more specific to senior-level positions?",
"Perfect! These questions are exactly what we need. Thank you!",
]

async def a_get_human_input(self, prompt: str) -> str:
# Call the asynchronous function to get user input asynchronously
user_input = await my_asynchronous_function()
return user_input
# Simulate async processing before responding
await simulate_async_processing(f"Processing user input #{self.response_count + 1}")

if self.response_count < len(self.predefined_responses):
response = self.predefined_responses[self.response_count]
self.response_count += 1
print(f"👤 User: {response}")
return response
else:
print("👤 User: TERMINATE")
return "TERMINATE"

# Asynchronous function to receive a message
async def a_receive(
self,
message: Union[Dict, str],
sender,
request_reply: Optional[bool] = None,
silent: Optional[bool] = False,
):
# Call the superclass method to handle message reception asynchronously
await super().a_receive(message, sender, request_reply, silent)


class CustomisedAssistantAgent(AssistantAgent):
# Asynchronous function to get human input
async def a_get_human_input(self, prompt: str) -> str:
# Call the asynchronous function to get user input asynchronously
user_input = await my_asynchronous_function()
return user_input

# Asynchronous function to receive a message
class AsyncAssistantAgent(AssistantAgent):
async def a_receive(
self,
message: Union[Dict, str],
sender,
request_reply: Optional[bool] = None,
silent: Optional[bool] = False,
):
# Call the superclass method to handle message reception asynchronously
# Simulate async processing before responding
await simulate_async_processing("Analyzing request and preparing response", 0.5)
await super().a_receive(message, sender, request_reply, silent)


nest_asyncio.apply()


async def main():
boss = CustomisedUserProxyAgent(
name="boss",
human_input_mode="ALWAYS",
max_consecutive_auto_reply=0,
print("🚀 Starting AG2 Async Demo")
print("=" * 50)

# Create agents with automated behavior
user_proxy = AutomatedUserProxyAgent(
name="hiring_manager",
human_input_mode="NEVER", # No human input required
max_consecutive_auto_reply=3,
code_execution_config=False,
is_termination_msg=lambda msg: "TERMINATE" in str(msg.get("content", "")),
)

assistant = CustomisedAssistantAgent(
name="assistant",
system_message="You will provide some agenda, and I will create questions for an interview meeting. Every time when you generate question then you have to ask user for feedback and if user provides the feedback then you have to incorporate that feedback and generate new set of questions and if user don't want to update then terminate the process and exit",
assistant = AsyncAssistantAgent(
name="interview_consultant",
system_message="""You are an expert interview consultant. When given interview topics,
you create thoughtful, relevant questions. You ask for feedback and incorporate it.
When the user is satisfied with the questions, end with 'TERMINATE'.""",
llm_config={"config_list": [{"model": "gpt-4o-mini", "api_key": os.environ.get("OPENAI_API_KEY")}]},
is_termination_msg=lambda msg: "TERMINATE" in str(msg.get("content", "")),
)

await boss.a_initiate_chat(
assistant,
message="Resume Review, Technical Skills Assessment, Project Discussion, Job Role Expectations, Closing Remarks.",
n_results=3,
)


# await main()
agentops.end_trace(tracer, end_state="Success")

# Let's check programmatically that spans were recorded in AgentOps
print("\n" + "=" * 50)
print("Now let's verify that our LLM calls were tracked properly...")
try:
agentops.validate_trace_spans(trace_context=tracer)
print("\n✅ Success! All LLM spans were properly recorded in AgentOps.")
except agentops.ValidationError as e:
print(f"\n❌ Error validating spans: {e}")
raise
try:
print("🤖 Initiating automated conversation...")
await user_proxy.a_initiate_chat(
assistant,
message="""I need help creating interview questions for these topics:
- Resume Review
- Technical Skills Assessment
- Project Discussion
- Job Role Expectations
- Closing Remarks

Please create 2-3 questions for each topic.""",
max_turns=6,
)
except Exception as e:
print(f"\n❌ Error occurred: {e}")
finally:
agentops.end_trace(tracer, end_state="Success")

# Validate AgentOps tracking
print("\n" + "=" * 50)
print("🔍 Validating AgentOps tracking...")
try:
agentops.validate_trace_spans(trace_context=tracer)
print("✅ Success! All LLM spans were properly recorded in AgentOps.")
except agentops.ValidationError as e:
print(f"❌ Error validating spans: {e}")
raise

print("\n🎉 Demo completed successfully!")


if __name__ == "__main__":
asyncio.run(main())
3 changes: 2 additions & 1 deletion examples/ag2/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
ag2
nest-asyncio
wikipedia-api
wikipedia-api
ag2[openai]
3 changes: 1 addition & 2 deletions examples/agno/agno_workflow_setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@
"""

from agno.agent import Agent, RunResponse
import asyncio
import agentops
from dotenv import load_dotenv
from agno.workflow import Workflow
Expand Down Expand Up @@ -124,4 +123,4 @@ def demonstrate_workflows():
raise


asyncio.run(demonstrate_workflows())
demonstrate_workflows()
8 changes: 7 additions & 1 deletion examples/agno/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,2 +1,8 @@
agno
aiohttp
aiohttp
openai
googlesearch-python
pycountry
arxiv
pypdf
duckduckgo-search
49 changes: 33 additions & 16 deletions examples/autogen/AgentChat.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
# Microsoft Autogen Chat Example
# Microsoft Autogen Multi-Agent Collaboration Example
#
# This example demonstrates AI-to-AI collaboration using multiple specialized agents working together without human interaction.
# AgentOps automatically configures itself when it's initialized meaning your agent run data will be tracked and logged to your AgentOps dashboard right away.
# First let's install the required packages
# %pip install -U autogen-agentchat
Expand All @@ -13,7 +14,7 @@

import agentops

from autogen_agentchat.agents import AssistantAgent, UserProxyAgent
from autogen_agentchat.agents import AssistantAgent
from autogen_ext.models.openai import OpenAIChatCompletionClient

from autogen_agentchat.teams import RoundRobinGroupChat
Expand All @@ -32,9 +33,10 @@
os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY", "your_openai_api_key_here")

# When initializing AgentOps, you can pass in optional tags to help filter sessions
agentops.init(auto_start_session=False, trace_name="Autogen Agent Chat Example")
agentops.init(auto_start_session=False, trace_name="Autogen Multi-Agent Collaboration Example")
tracer = agentops.start_trace(
trace_name="Microsoft Agent Chat Example", tags=["autogen-chat", "microsoft-autogen", "agentops-example"]
trace_name="Microsoft Multi-Agent Collaboration Example",
tags=["autogen-collaboration", "microsoft-autogen", "agentops-example"],
)

# AutoGen will now start automatically tracking
Expand All @@ -45,38 +47,53 @@
# * Correspondence between agents
# * Tool usage
# * Errors
# # Simple Chat Example
# # Multi-Agent Collaboration Example
# Define model and API key
model_name = "gpt-4o-mini" # Or "gpt-4o" / "gpt-4o-mini" as per migration guide examples
api_key = os.getenv("OPENAI_API_KEY")

# Create the model client
model_client = OpenAIChatCompletionClient(model=model_name, api_key=api_key)

# Create the agent that uses the LLM.
assistant = AssistantAgent(
name="assistant",
system_message="You are a helpful assistant.", # Added system message for clarity
# Create multiple AI agents with different roles
research_agent = AssistantAgent(
name="research_agent",
system_message="You are a research specialist. Your role is to gather information, analyze data, and provide insights on topics. You ask thoughtful questions and provide well-researched responses.",
model_client=model_client,
)

user_proxy_initiator = UserProxyAgent("user_initiator")
creative_agent = AssistantAgent(
name="creative_agent",
system_message="You are a creative strategist. Your role is to brainstorm innovative solutions, think outside the box, and propose creative approaches to problems. You build on others' ideas and suggest novel perspectives.",
model_client=model_client,
)

analyst_agent = AssistantAgent(
name="analyst_agent",
system_message="You are a critical analyst. Your role is to evaluate ideas, identify strengths and weaknesses, and provide constructive feedback. You help refine concepts and ensure practical feasibility.",
model_client=model_client,
)


async def main():
termination = MaxMessageTermination(max_messages=2)
# Set up a longer conversation to allow for meaningful AI-to-AI interaction
termination = MaxMessageTermination(max_messages=8)

group_chat = RoundRobinGroupChat(
[user_proxy_initiator, assistant], # Corrected: agents as positional argument
[research_agent, creative_agent, analyst_agent], # AI agents working together
termination_condition=termination,
)

chat_task = "How can I help you today?"
print(f"User Initiator: {chat_task}")
# A task that will engage all three agents in meaningful collaboration
chat_task = "Let's develop a comprehensive strategy for reducing plastic waste in urban environments. I need research on current methods, creative solutions, and analysis of feasibility."
print(f"🎯 Task: {chat_task}")
print("\n" + "=" * 80)
print("🤖 AI Agents Collaboration Starting...")
print("=" * 80)

try:
stream = group_chat.run_stream(task=chat_task)
await Console().run(stream)
await Console(stream=stream)
agentops.end_trace(tracer, end_state="Success")

except Exception as e:
Expand Down Expand Up @@ -112,4 +129,4 @@ async def main():

# You can view data on this run at [app.agentops.ai](app.agentops.ai).
#
# The dashboard will display LLM events for each message sent by each agent, including those made by the human user.
# The dashboard will display LLM events for each message sent by each agent, showing the full AI-to-AI collaboration process with research, creative, and analytical perspectives.
3 changes: 2 additions & 1 deletion examples/autogen/requirements.txt
Original file line number Diff line number Diff line change
@@ -1 +1,2 @@
pyautogen
autogen-agentchat==0.6.1
autogen-ext[openai]
Loading
Loading