Skip to content

Commit c2527a1

Browse files
committed
Update LLM and MCP Integration
- Changed the LLM model from "groq/llama-3.3-70b-versatile" to "groq/llama-3.2-90b-vision-preview" in `groq-mcp.py` for improved performance. - Added a new example script `ollama-python.py` for stock price retrieval using the Ollama model. - Incremented version number to 0.0.70 in `pyproject.toml` to reflect recent changes. - Updated Python version requirement in `uv.lock` to support Python 3.11 and above for better compatibility. - Enhanced the LLM class to handle tool calls specifically for Ollama models, ensuring proper processing of tool results.
1 parent 748f22f commit c2527a1

File tree

5 files changed

+156
-506
lines changed

5 files changed

+156
-506
lines changed

examples/mcp/groq-mcp.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
search_agent = Agent(
44
instructions="""You help book apartments on Airbnb.""",
5-
llm="groq/llama-3.3-70b-versatile",
5+
llm="groq/llama-3.2-90b-vision-preview",
66
tools=MCP("npx -y @openbnb/mcp-server-airbnb --ignore-robots-txt")
77
)
88

examples/mcp/ollama-python.py

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
from praisonaiagents import Agent, MCP
2+
3+
search_agent = Agent(
4+
instructions="""You help book apartments on Airbnb.""",
5+
llm="ollama/llama3.2",
6+
tools=MCP("/Users/praison/miniconda3/envs/mcp/bin/python /Users/praison/stockprice/app.py")
7+
)
8+
9+
search_agent.start("What is the Stock Price of Apple?")

src/praisonai-agents/praisonaiagents/llm/llm.py

Lines changed: 142 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -460,9 +460,28 @@ def get_response(
460460
for tool_call in tool_calls:
461461
# Handle both object and dict access patterns
462462
if isinstance(tool_call, dict):
463-
function_name = tool_call["function"]["name"]
464-
arguments = json.loads(tool_call["function"]["arguments"])
465-
tool_call_id = tool_call["id"]
463+
# Special handling for Ollama provider which may have a different structure
464+
if self.model and self.model.startswith("ollama/"):
465+
try:
466+
# Try standard format first
467+
if "function" in tool_call and isinstance(tool_call["function"], dict):
468+
function_name = tool_call["function"]["name"]
469+
arguments = json.loads(tool_call["function"]["arguments"])
470+
else:
471+
# Try alternative format that Ollama might return
472+
function_name = tool_call.get("name", "unknown_function")
473+
arguments = json.loads(tool_call.get("arguments", "{}"))
474+
tool_call_id = tool_call.get("id", f"tool_{id(tool_call)}")
475+
except Exception as e:
476+
logging.error(f"Error processing Ollama tool call: {e}")
477+
function_name = "unknown_function"
478+
arguments = {}
479+
tool_call_id = f"tool_{id(tool_call)}"
480+
else:
481+
# Standard format for other providers
482+
function_name = tool_call["function"]["name"]
483+
arguments = json.loads(tool_call["function"]["arguments"])
484+
tool_call_id = tool_call["id"]
466485
else:
467486
function_name = tool_call.function.name
468487
arguments = json.loads(tool_call.function.arguments)
@@ -490,8 +509,55 @@ def get_response(
490509
"content": json.dumps(tool_result) if tool_result is not None else "Function returned an empty output"
491510
})
492511

512+
# Special handling for Ollama models that don't automatically process tool results
513+
if self.model and self.model.startswith("ollama/") and tool_result:
514+
# For Ollama models, we need to explicitly ask the model to process the tool results
515+
# First, check if the response is just a JSON tool call
516+
try:
517+
# If the response_text is a valid JSON that looks like a tool call,
518+
# we need to make a follow-up call to process the results
519+
json_response = json.loads(response_text.strip())
520+
if ('name' in json_response or 'function' in json_response) and not any(word in response_text.lower() for word in ['summary', 'option', 'result', 'found']):
521+
logging.debug("Detected Ollama returning only tool call JSON, making follow-up call to process results")
522+
523+
# Create a prompt that asks the model to process the tool results
524+
follow_up_prompt = f"I've searched for apartments and found these results. Please analyze them and provide a summary of the best options:\n\n{json.dumps(tool_result, indent=2)}\n\nPlease format your response as a nice summary with the top options."
525+
526+
# Make a follow-up call to process the results
527+
follow_up_messages = [
528+
{"role": "user", "content": follow_up_prompt}
529+
]
530+
531+
# Get response with streaming
532+
if verbose:
533+
with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
534+
response_text = ""
535+
for chunk in litellm.completion(
536+
model=self.model,
537+
messages=follow_up_messages,
538+
temperature=temperature,
539+
stream=True
540+
):
541+
if chunk and chunk.choices and chunk.choices[0].delta.content:
542+
content = chunk.choices[0].delta.content
543+
response_text += content
544+
live.update(display_generating(response_text, start_time))
545+
else:
546+
response_text = ""
547+
for chunk in litellm.completion(
548+
model=self.model,
549+
messages=follow_up_messages,
550+
temperature=temperature,
551+
stream=True
552+
):
553+
if chunk and chunk.choices and chunk.choices[0].delta.content:
554+
response_text += chunk.choices[0].delta.content
555+
except (json.JSONDecodeError, KeyError):
556+
# Not a JSON response or not a tool call format, continue normally
557+
pass
558+
493559
# If reasoning_steps is True, do a single non-streaming call
494-
if reasoning_steps:
560+
elif reasoning_steps:
495561
resp = litellm.completion(
496562
model=self.model,
497563
messages=messages,
@@ -969,9 +1035,28 @@ async def get_response_async(
9691035
for tool_call in tool_calls:
9701036
# Handle both object and dict access patterns
9711037
if isinstance(tool_call, dict):
972-
function_name = tool_call["function"]["name"]
973-
arguments = json.loads(tool_call["function"]["arguments"])
974-
tool_call_id = tool_call["id"]
1038+
# Special handling for Ollama provider which may have a different structure
1039+
if self.model and self.model.startswith("ollama/"):
1040+
try:
1041+
# Try standard format first
1042+
if "function" in tool_call and isinstance(tool_call["function"], dict):
1043+
function_name = tool_call["function"]["name"]
1044+
arguments = json.loads(tool_call["function"]["arguments"])
1045+
else:
1046+
# Try alternative format that Ollama might return
1047+
function_name = tool_call.get("name", "unknown_function")
1048+
arguments = json.loads(tool_call.get("arguments", "{}"))
1049+
tool_call_id = tool_call.get("id", f"tool_{id(tool_call)}")
1050+
except Exception as e:
1051+
logging.error(f"Error processing Ollama tool call: {e}")
1052+
function_name = "unknown_function"
1053+
arguments = {}
1054+
tool_call_id = f"tool_{id(tool_call)}"
1055+
else:
1056+
# Standard format for other providers
1057+
function_name = tool_call["function"]["name"]
1058+
arguments = json.loads(tool_call["function"]["arguments"])
1059+
tool_call_id = tool_call["id"]
9751060
else:
9761061
function_name = tool_call.function.name
9771062
arguments = json.loads(tool_call.function.arguments)
@@ -994,7 +1079,56 @@ async def get_response_async(
9941079

9951080
# Get response after tool calls
9961081
response_text = ""
997-
if reasoning_steps:
1082+
1083+
# Special handling for Ollama models that don't automatically process tool results
1084+
if self.model and self.model.startswith("ollama/") and tool_result:
1085+
# For Ollama models, we need to explicitly ask the model to process the tool results
1086+
# First, check if the response is just a JSON tool call
1087+
try:
1088+
# If the response_text is a valid JSON that looks like a tool call,
1089+
# we need to make a follow-up call to process the results
1090+
json_response = json.loads(response_text.strip())
1091+
if ('name' in json_response or 'function' in json_response) and not any(word in response_text.lower() for word in ['summary', 'option', 'result', 'found']):
1092+
logging.debug("Detected Ollama returning only tool call JSON in async mode, making follow-up call to process results")
1093+
1094+
# Create a prompt that asks the model to process the tool results
1095+
follow_up_prompt = f"I've searched for apartments and found these results. Please analyze them and provide a summary of the best options:\n\n{json.dumps(tool_result, indent=2)}\n\nPlease format your response as a nice summary with the top options."
1096+
1097+
# Make a follow-up call to process the results
1098+
follow_up_messages = [
1099+
{"role": "user", "content": follow_up_prompt}
1100+
]
1101+
1102+
# Get response with streaming
1103+
if verbose:
1104+
response_text = ""
1105+
async for chunk in await litellm.acompletion(
1106+
model=self.model,
1107+
messages=follow_up_messages,
1108+
temperature=temperature,
1109+
stream=True
1110+
):
1111+
if chunk and chunk.choices and chunk.choices[0].delta.content:
1112+
content = chunk.choices[0].delta.content
1113+
response_text += content
1114+
print("\033[K", end="\r")
1115+
print(f"Processing results... {time.time() - start_time:.1f}s", end="\r")
1116+
else:
1117+
response_text = ""
1118+
async for chunk in await litellm.acompletion(
1119+
model=self.model,
1120+
messages=follow_up_messages,
1121+
temperature=temperature,
1122+
stream=True
1123+
):
1124+
if chunk and chunk.choices and chunk.choices[0].delta.content:
1125+
response_text += chunk.choices[0].delta.content
1126+
except (json.JSONDecodeError, KeyError):
1127+
# Not a JSON response or not a tool call format, continue normally
1128+
pass
1129+
1130+
# If no special handling was needed or if it's not an Ollama model
1131+
elif reasoning_steps:
9981132
# Non-streaming call to capture reasoning
9991133
resp = await litellm.acompletion(
10001134
model=self.model,

src/praisonai-agents/pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
44

55
[project]
66
name = "praisonaiagents"
7-
version = "0.0.69"
7+
version = "0.0.70"
88
description = "Praison AI agents for completing complex tasks with Self Reflection Agents"
99
authors = [
1010
{ name="Mervin Praison" }

0 commit comments

Comments
 (0)