Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions examples/mcp/anthropic-mcp.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
from praisonaiagents import Agent, MCP

search_agent = Agent(
instructions="""You help book apartments on Airbnb.""",
llm="anthropic/claude-3-7-sonnet-20250219",
tools=MCP("npx -y @openbnb/mcp-server-airbnb --ignore-robots-txt")
)

search_agent.start("MUST USE airbnb_search Tool to Search. Search for Apartments in Paris for 2 nights. 04/28 - 04/30 for 2 adults. All Your Preference")
9 changes: 9 additions & 0 deletions examples/mcp/gemini-mcp.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
from praisonaiagents import Agent, MCP

search_agent = Agent(
instructions="""You help book apartments on Airbnb.""",
llm="gemini/gemini-2.5-pro-exp-03-25",
tools=MCP("npx -y @openbnb/mcp-server-airbnb --ignore-robots-txt")
)

search_agent.start("MUST USE airbnb_search Tool to Search. Search for Apartments in Paris for 2 nights. 04/28 - 04/30 for 2 adults. All Your Preference")
9 changes: 9 additions & 0 deletions examples/mcp/groq-mcp.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
from praisonaiagents import Agent, MCP

search_agent = Agent(
instructions="""You help book apartments on Airbnb.""",
llm="groq/llama-3.3-70b-versatile",
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

high

The LLM model version here is groq/llama-3.3-70b-versatile. Is this the correct and most up-to-date version? It differs from the version used in src/praisonai-agents/groq-mcp.py.

tools=MCP("npx -y @openbnb/mcp-server-airbnb --ignore-robots-txt")
)

search_agent.start("MUST USE airbnb_search Tool to Search. Search for Apartments in Paris for 2 nights. 04/28 - 04/30 for 2 adults. All Your Preference")
9 changes: 9 additions & 0 deletions examples/mcp/mistral-mcp.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
from praisonaiagents import Agent, MCP

search_agent = Agent(
instructions="""You help book apartments on Airbnb.""",
llm="mistral/mistral-large-latest",
tools=MCP("npx -y @openbnb/mcp-server-airbnb --ignore-robots-txt")
)

search_agent.start("MUST USE airbnb_search Tool to Search. Search for Apartments in Paris for 2 nights. 04/28 - 04/30 for 2 adults. All Your Preference")
9 changes: 9 additions & 0 deletions examples/mcp/openai-mcp.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
from praisonaiagents import Agent, MCP

search_agent = Agent(
instructions="""You help book apartments on Airbnb.""",
llm="gpt-4o-mini",
tools=MCP("npx -y @openbnb/mcp-server-airbnb --ignore-robots-txt")
)

search_agent.start("I want to book an apartment in Paris for 2 nights. 03/28 - 03/30 for 2 adults")
9 changes: 9 additions & 0 deletions src/praisonai-agents/groq-mcp.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
from praisonaiagents import Agent, MCP

search_agent = Agent(
instructions="""You help book apartments on Airbnb.""",
llm="groq/llama-3.2-90b-vision-preview",
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

high

The LLM model version here is groq/llama-3.2-90b-vision-preview. Is this the correct and most up-to-date version? It differs from the version used in examples/mcp/groq-mcp.py.

tools=MCP("npx -y @openbnb/mcp-server-airbnb --ignore-robots-txt")
)

search_agent.start("MUST USE airbnb_search Tool to Search. Search for Apartments in Paris for 2 nights. 04/28 - 04/30 for 2 adults. All Your Preference")
9 changes: 9 additions & 0 deletions src/praisonai-agents/openai-mcp.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
from praisonaiagents import Agent, MCP

search_agent = Agent(
instructions="""You help book apartments on Airbnb.""",
llm="openai/gpt-4o-mini",
tools=MCP("npx -y @openbnb/mcp-server-airbnb --ignore-robots-txt")
)

search_agent.start("I want to book an apartment in Paris for 2 nights. 03/28 - 03/30 for 2 adults")
89 changes: 60 additions & 29 deletions src/praisonai-agents/praisonaiagents/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -436,15 +436,37 @@ def get_response(

# Handle tool calls
if tool_calls and execute_tool_fn:
# Convert tool_calls to a serializable format for all providers
serializable_tool_calls = []
for tc in tool_calls:
if isinstance(tc, dict):
serializable_tool_calls.append(tc) # Already a dict
else:
# Convert object to dict
serializable_tool_calls.append({
"id": tc.id,
"type": getattr(tc, 'type', "function"),
"function": {
"name": tc.function.name,
"arguments": tc.function.arguments
}
})
messages.append({
"role": "assistant",
"content": response_text,
"tool_calls": tool_calls
"tool_calls": serializable_tool_calls
})

for tool_call in tool_calls:
function_name = tool_call["function"]["name"]
arguments = json.loads(tool_call["function"]["arguments"])
# Handle both object and dict access patterns
if isinstance(tool_call, dict):
function_name = tool_call["function"]["name"]
arguments = json.loads(tool_call["function"]["arguments"])
tool_call_id = tool_call["id"]
else:
function_name = tool_call.function.name
arguments = json.loads(tool_call.function.arguments)
tool_call_id = tool_call.id

logging.debug(f"[TOOL_EXEC_DEBUG] About to execute tool {function_name} with args: {arguments}")
tool_result = execute_tool_fn(function_name, arguments)
Expand All @@ -462,18 +484,11 @@ def get_response(
logging.debug(f"[TOOL_EXEC_DEBUG] About to display tool call with message: {display_message}")
display_tool_call(display_message, console=console)

messages.append({
"role": "tool",
"tool_call_id": tool_call["id"],
"content": json.dumps(tool_result)
})
else:
logging.debug("[TOOL_EXEC_DEBUG] Verbose mode off, not displaying tool call")
messages.append({
"role": "tool",
"tool_call_id": tool_call["id"],
"content": "Function returned an empty output"
})
messages.append({
"role": "tool",
"tool_call_id": tool_call_id,
"content": json.dumps(tool_result) if tool_result is not None else "Function returned an empty output"
})

# If reasoning_steps is True, do a single non-streaming call
if reasoning_steps:
Expand Down Expand Up @@ -930,15 +945,37 @@ async def get_response_async(
tool_calls = tool_response.choices[0].message.get("tool_calls")

if tool_calls:
# Convert tool_calls to a serializable format for all providers
serializable_tool_calls = []
for tc in tool_calls:
if isinstance(tc, dict):
serializable_tool_calls.append(tc) # Already a dict
else:
# Convert object to dict
serializable_tool_calls.append({
"id": tc.id,
"type": getattr(tc, 'type', "function"),
"function": {
"name": tc.function.name,
"arguments": tc.function.arguments
}
})
messages.append({
"role": "assistant",
"content": response_text,
"tool_calls": tool_calls
"tool_calls": serializable_tool_calls
})

for tool_call in tool_calls:
function_name = tool_call.function.name
arguments = json.loads(tool_call.function.arguments)
# Handle both object and dict access patterns
if isinstance(tool_call, dict):
function_name = tool_call["function"]["name"]
arguments = json.loads(tool_call["function"]["arguments"])
tool_call_id = tool_call["id"]
else:
function_name = tool_call.function.name
arguments = json.loads(tool_call.function.arguments)
tool_call_id = tool_call.id

tool_result = await execute_tool_fn(function_name, arguments)

Expand All @@ -949,17 +986,11 @@ async def get_response_async(
else:
display_message += "Function returned no output"
display_tool_call(display_message, console=console)
messages.append({
"role": "tool",
"tool_call_id": tool_call.id,
"content": json.dumps(tool_result)
})
else:
messages.append({
"role": "tool",
"tool_call_id": tool_call.id,
"content": "Function returned an empty output"
})
messages.append({
"role": "tool",
"tool_call_id": tool_call_id,
"content": json.dumps(tool_result) if tool_result is not None else "Function returned an empty output"
})

# Get response after tool calls
response_text = ""
Expand Down
2 changes: 1 addition & 1 deletion src/praisonai-agents/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"

[project]
name = "praisonaiagents"
version = "0.0.68"
version = "0.0.69"
description = "Praison AI agents for completing complex tasks with Self Reflection Agents"
authors = [
{ name="Mervin Praison" }
Expand Down
Loading
Loading