Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion examples/mcp/groq-mcp.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

search_agent = Agent(
instructions="""You help book apartments on Airbnb.""",
llm="groq/llama-3.3-70b-versatile",
llm="groq/llama-3.2-90b-vision-preview",
tools=MCP("npx -y @openbnb/mcp-server-airbnb --ignore-robots-txt")
)

Expand Down
9 changes: 9 additions & 0 deletions examples/mcp/ollama-python.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
from praisonaiagents import Agent, MCP

search_agent = Agent(
instructions="""You help book apartments on Airbnb.""",
llm="ollama/llama3.2",
tools=MCP("/Users/praison/miniconda3/envs/mcp/bin/python /Users/praison/stockprice/app.py")
)

search_agent.start("What is the Stock Price of Apple?")
150 changes: 142 additions & 8 deletions src/praisonai-agents/praisonaiagents/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -460,9 +460,28 @@ def get_response(
for tool_call in tool_calls:
# Handle both object and dict access patterns
if isinstance(tool_call, dict):
function_name = tool_call["function"]["name"]
arguments = json.loads(tool_call["function"]["arguments"])
tool_call_id = tool_call["id"]
# Special handling for Ollama provider which may have a different structure
if self.model and self.model.startswith("ollama/"):
try:
# Try standard format first
if "function" in tool_call and isinstance(tool_call["function"], dict):
function_name = tool_call["function"]["name"]
arguments = json.loads(tool_call["function"]["arguments"])
else:
# Try alternative format that Ollama might return
function_name = tool_call.get("name", "unknown_function")
arguments = json.loads(tool_call.get("arguments", "{}"))
tool_call_id = tool_call.get("id", f"tool_{id(tool_call)}")
Comment on lines +471 to +474
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

Consider adding a more descriptive name for the unknown_function fallback. Also, the tool_call_id generation could be more robust, perhaps including a hash of the function name and arguments to ensure uniqueness. Is there a risk of collisions with f"tool_{id(tool_call)}"?

Suggested change
# Try alternative format that Ollama might return
function_name = tool_call.get("name", "unknown_function")
arguments = json.loads(tool_call.get("arguments", "{}"))
tool_call_id = tool_call.get("id", f"tool_{id(tool_call)}")
function_name = tool_call.get("name", "default_function") # Provide a more descriptive fallback name
arguments = json.loads(tool_call.get("arguments", "{}"))
tool_call_id = tool_call.get("id", f"tool_{hashlib.md5((function_name + str(arguments)).encode()).hexdigest()}") # Include a hash for uniqueness

except Exception as e:
logging.error(f"Error processing Ollama tool call: {e}")
function_name = "unknown_function"
arguments = {}
tool_call_id = f"tool_{id(tool_call)}"
Comment on lines +475 to +479
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

Catching a broad Exception here might mask unexpected errors. It would be better to catch specific exceptions like KeyError or json.JSONDecodeError that you anticipate. Also, consider logging the original exception for debugging purposes.

Suggested change
except Exception as e:
logging.error(f"Error processing Ollama tool call: {e}")
function_name = "unknown_function"
arguments = {}
tool_call_id = f"tool_{id(tool_call)}"
except (KeyError, json.JSONDecodeError) as e:
logging.error(f"Error processing Ollama tool call: {e}")
function_name = "unknown_function"
arguments = {}
tool_call_id = f"tool_{id(tool_call)}"

else:
# Standard format for other providers
function_name = tool_call["function"]["name"]
arguments = json.loads(tool_call["function"]["arguments"])
tool_call_id = tool_call["id"]
else:
function_name = tool_call.function.name
arguments = json.loads(tool_call.function.arguments)
Expand Down Expand Up @@ -490,8 +509,55 @@ def get_response(
"content": json.dumps(tool_result) if tool_result is not None else "Function returned an empty output"
})

# Special handling for Ollama models that don't automatically process tool results
if self.model and self.model.startswith("ollama/") and tool_result:
# For Ollama models, we need to explicitly ask the model to process the tool results
# First, check if the response is just a JSON tool call
try:
# If the response_text is a valid JSON that looks like a tool call,
# we need to make a follow-up call to process the results
json_response = json.loads(response_text.strip())
if ('name' in json_response or 'function' in json_response) and not any(word in response_text.lower() for word in ['summary', 'option', 'result', 'found']):
logging.debug("Detected Ollama returning only tool call JSON, making follow-up call to process results")

# Create a prompt that asks the model to process the tool results
follow_up_prompt = f"I've searched for apartments and found these results. Please analyze them and provide a summary of the best options:\n\n{json.dumps(tool_result, indent=2)}\n\nPlease format your response as a nice summary with the top options."
Comment on lines +519 to +524
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

high

This follow-up prompt is hardcoded and specific to apartment searches. Consider making this more generic or configurable to handle different tool results and desired summaries. Also, the logic to detect if a follow up is needed could be improved to be more robust.

                                json_response = json.loads(response_text.strip())
                                if ('name' in json_response or 'function' in json_response) and not any(word in response_text.lower() for word in ['summary', 'option', 'result', 'found']):
                                    logging.debug("Detected Ollama returning only tool call JSON, making follow-up call to process results")
                                    # Create a prompt that asks the model to process the tool results
                                    follow_up_prompt = f"Please analyze the following results and provide a summary:

{json.dumps(tool_result, indent=2)}"
                                    # Create a prompt that asks the model to process the tool results
                                    # follow_up_prompt = f"I've searched for apartments and found these results. Please analyze them and provide a summary of the best options:\n\n{json.dumps(tool_result, indent=2)}\n\n Please format your response as a nice summary with the top options."


# Make a follow-up call to process the results
follow_up_messages = [
{"role": "user", "content": follow_up_prompt}
]

# Get response with streaming
if verbose:
with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
response_text = ""
for chunk in litellm.completion(
model=self.model,
messages=follow_up_messages,
temperature=temperature,
stream=True
):
if chunk and chunk.choices and chunk.choices[0].delta.content:
content = chunk.choices[0].delta.content
response_text += content
live.update(display_generating(response_text, start_time))
else:
response_text = ""
for chunk in litellm.completion(
model=self.model,
messages=follow_up_messages,
temperature=temperature,
stream=True
):
if chunk and chunk.choices and chunk.choices[0].delta.content:
response_text += chunk.choices[0].delta.content
except (json.JSONDecodeError, KeyError):
# Not a JSON response or not a tool call format, continue normally
pass

# If reasoning_steps is True, do a single non-streaming call
if reasoning_steps:
elif reasoning_steps:
resp = litellm.completion(
model=self.model,
messages=messages,
Expand Down Expand Up @@ -969,9 +1035,28 @@ async def get_response_async(
for tool_call in tool_calls:
# Handle both object and dict access patterns
if isinstance(tool_call, dict):
function_name = tool_call["function"]["name"]
arguments = json.loads(tool_call["function"]["arguments"])
tool_call_id = tool_call["id"]
# Special handling for Ollama provider which may have a different structure
if self.model and self.model.startswith("ollama/"):
try:
# Try standard format first
if "function" in tool_call and isinstance(tool_call["function"], dict):
function_name = tool_call["function"]["name"]
arguments = json.loads(tool_call["function"]["arguments"])
else:
# Try alternative format that Ollama might return
function_name = tool_call.get("name", "unknown_function")
arguments = json.loads(tool_call.get("arguments", "{}"))
tool_call_id = tool_call.get("id", f"tool_{id(tool_call)}")
except Exception as e:
logging.error(f"Error processing Ollama tool call: {e}")
function_name = "unknown_function"
arguments = {}
tool_call_id = f"tool_{id(tool_call)}"
else:
# Standard format for other providers
function_name = tool_call["function"]["name"]
arguments = json.loads(tool_call["function"]["arguments"])
tool_call_id = tool_call["id"]
else:
function_name = tool_call.function.name
arguments = json.loads(tool_call.function.arguments)
Expand All @@ -994,7 +1079,56 @@ async def get_response_async(

# Get response after tool calls
response_text = ""
if reasoning_steps:

# Special handling for Ollama models that don't automatically process tool results
if self.model and self.model.startswith("ollama/") and tool_result:
# For Ollama models, we need to explicitly ask the model to process the tool results
# First, check if the response is just a JSON tool call
try:
# If the response_text is a valid JSON that looks like a tool call,
# we need to make a follow-up call to process the results
json_response = json.loads(response_text.strip())
if ('name' in json_response or 'function' in json_response) and not any(word in response_text.lower() for word in ['summary', 'option', 'result', 'found']):
logging.debug("Detected Ollama returning only tool call JSON in async mode, making follow-up call to process results")
Comment on lines +1090 to +1092
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

high

This follow-up prompt is hardcoded and specific to apartment searches. Consider making this more generic or configurable to handle different tool results and desired summaries. Also, the logic to detect if a follow up is needed could be improved to be more robust.

                                json_response = json.loads(response_text.strip())
                                if ('name' in json_response or 'function' in json_response) and not any(word in response_text.lower() for word in ['summary', 'option', 'result', 'found']):
                                    logging.debug("Detected Ollama returning only tool call JSON in async mode, making follow-up call to process results")
                                    # Create a prompt that asks the model to process the tool results
                                    follow_up_prompt = f"Please analyze the following results and provide a summary:\n\n{json.dumps(tool_result, indent=2)}"
                                    # Create a prompt that asks the model to process the tool results
                                    # follow_up_prompt = f"I've searched for apartments and found these results. Please analyze them and provide a summary of the best options:\n\n{json.dumps(tool_result, indent=2)}\n\n Please format your response as a nice summary with the top options."


# Create a prompt that asks the model to process the tool results
follow_up_prompt = f"I've searched for apartments and found these results. Please analyze them and provide a summary of the best options:\n\n{json.dumps(tool_result, indent=2)}\n\nPlease format your response as a nice summary with the top options."

# Make a follow-up call to process the results
follow_up_messages = [
{"role": "user", "content": follow_up_prompt}
]

# Get response with streaming
if verbose:
response_text = ""
async for chunk in await litellm.acompletion(
model=self.model,
messages=follow_up_messages,
temperature=temperature,
stream=True
):
if chunk and chunk.choices and chunk.choices[0].delta.content:
content = chunk.choices[0].delta.content
response_text += content
print("\033[K", end="\r")
print(f"Processing results... {time.time() - start_time:.1f}s", end="\r")
else:
response_text = ""
async for chunk in await litellm.acompletion(
model=self.model,
messages=follow_up_messages,
temperature=temperature,
stream=True
):
if chunk and chunk.choices and chunk.choices[0].delta.content:
response_text += chunk.choices[0].delta.content
except (json.JSONDecodeError, KeyError):
# Not a JSON response or not a tool call format, continue normally
pass

# If no special handling was needed or if it's not an Ollama model
elif reasoning_steps:
# Non-streaming call to capture reasoning
resp = await litellm.acompletion(
model=self.model,
Expand Down
2 changes: 1 addition & 1 deletion src/praisonai-agents/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"

[project]
name = "praisonaiagents"
version = "0.0.69"
version = "0.0.70"
description = "Praison AI agents for completing complex tasks with Self Reflection Agents"
authors = [
{ name="Mervin Praison" }
Expand Down
Loading
Loading