Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion docs/mcp/airbnb.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ description: "Guide for integrating Airbnb booking capabilities with PraisonAI a
icon: "airbnb"
---

# Airbnb MCP Integration
## Add Airbnb Tool to AI Agent

```mermaid
flowchart LR
Expand Down
13 changes: 13 additions & 0 deletions examples/mcp/fetch-mcp.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
from praisonaiagents import Agent, MCP
import os

# pip install mcp-server-fetch
# Use a single string command with Fetch configuration
fetch_agent = Agent(
instructions="""You are a helpful assistant that can fetch and process web content.
Use the available tools when relevant to retrieve and convert web pages to markdown.""",
llm="gpt-4o-mini",
tools=MCP("python -m mcp_server_fetch")
)

fetch_agent.start("Fetch and convert the content from https://example.com to markdown")
23 changes: 23 additions & 0 deletions examples/mcp/git-mcp.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
from praisonaiagents import Agent, MCP
import os

# pip install mcp-server-git
# Get Git credentials from environment
git_username = os.getenv("GIT_USERNAME")
git_email = os.getenv("GIT_EMAIL")
git_token = os.getenv("GIT_TOKEN") # For private repos

# Use a single string command with Git configuration
git_agent = Agent(
instructions="""You are a helpful assistant that can perform Git operations.
Use the available tools when relevant to manage repositories, commits, and branches.""",
llm="gpt-4o-mini",
tools=MCP("python -m mcp_server_git",
env={
"GIT_USERNAME": git_username,
"GIT_EMAIL": git_email,
"GIT_TOKEN": git_token
})
)

git_agent.start("Clone and analyze the repository at https://github.com/modelcontextprotocol/servers")
9 changes: 9 additions & 0 deletions examples/mcp/ollama.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
from praisonaiagents import Agent, MCP

search_agent = Agent(
instructions="""You help book apartments on Airbnb.""",
llm="ollama/llama3.2",
tools=MCP("npx -y @openbnb/mcp-server-airbnb --ignore-robots-txt")
)

search_agent.start("MUST USE airbnb_search Tool to Search. Search for Apartments in Paris for 2 nights. 04/28 - 04/30 for 2 adults. All Your Preference")
16 changes: 16 additions & 0 deletions examples/mcp/sentry-mcp.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
from praisonaiagents import Agent, MCP
import os

# pip install mcp-server-sentry
# Get Sentry auth token from environment
sentry_token = os.getenv("SENTRY_AUTH_TOKEN")

# Use a single string command with Sentry configuration
sentry_agent = Agent(
instructions="""You are a helpful assistant that can analyze Sentry error reports.
Use the available tools when relevant to inspect and debug application issues.""",
llm="gpt-4o-mini",
tools=MCP("python -m mcp_server_sentry --auth-token", args=[sentry_token])
)

sentry_agent.start("Analyze the most recent critical error in Sentry")
13 changes: 13 additions & 0 deletions examples/mcp/time-mcp.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
from praisonaiagents import Agent, MCP
import os

# pip install mcp-server-time
# Use a single string command with Time Server configuration
time_agent = Agent(
instructions="""You are a helpful assistant that can handle time-related operations.
Use the available tools when relevant to manage timezone conversions and time information.""",
llm="gpt-4o-mini",
tools=MCP("python -m mcp_server_time --local-timezone=America/New_York")
)

time_agent.start("Get the current time in New York and convert it to UTC")
2 changes: 2 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ dependencies = [
"python-dotenv>=0.19.0",
"instructor>=1.3.3",
"PyYAML>=6.0",
"mcp==1.6.0",
]

[project.optional-dependencies]
Expand Down Expand Up @@ -106,6 +107,7 @@ praisonaiagents = ">=0.0.67"
python-dotenv = ">=0.19.0"
instructor = ">=1.3.3"
PyYAML = ">=6.0"
mcp = "==1.6.0"
pyautogen = {version = ">=0.2.19", optional = true}
crewai = {version = ">=0.32.0", optional = true}
praisonai-tools = {version = ">=0.0.7", optional = true}
Expand Down
9 changes: 9 additions & 0 deletions src/praisonai-agents/mcp-ollama.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
from praisonaiagents import Agent, MCP

search_agent = Agent(
instructions="""You help book apartments on Airbnb.""",
llm="ollama/llama3.2",
tools=MCP("npx -y @openbnb/mcp-server-airbnb --ignore-robots-txt")
)

search_agent.start("Search for Apartments in Paris for 2 nights. 04/28 - 04/30 for 2 adults. All Your Preference. After searching Give me summary")
108 changes: 89 additions & 19 deletions src/praisonai-agents/praisonaiagents/agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -421,6 +421,12 @@ def __init__(
# Pass the entire string so LiteLLM can parse provider/model
self.llm_instance = LLM(model=llm)
self._using_custom_llm = True

# Ensure tools are properly accessible when using custom LLM
if tools:
logging.debug(f"Tools passed to Agent with custom LLM: {tools}")
# Store the tools for later use
self.tools = tools
except ImportError as e:
raise ImportError(
"LLM features requested but dependencies not installed. "
Expand Down Expand Up @@ -519,9 +525,20 @@ def execute_tool(self, function_name, arguments):
"""
logging.debug(f"{self.name} executing tool {function_name} with arguments: {arguments}")

# Special handling for MCP tools
# Check if tools is an MCP instance with the requested function name
from ..mcp.mcp import MCP
if isinstance(self.tools, MCP):
logging.debug(f"Looking for MCP tool {function_name}")
# Check if any of the MCP tools match the function name
for mcp_tool in self.tools.runner.tools:
if hasattr(mcp_tool, 'name') and mcp_tool.name == function_name:
logging.debug(f"Found matching MCP tool: {function_name}")
return self.tools.runner.call_tool(function_name, arguments)

# Try to find the function in the agent's tools list first
func = None
for tool in self.tools:
for tool in self.tools if isinstance(self.tools, (list, tuple)) else []:
if (callable(tool) and getattr(tool, '__name__', '') == function_name) or \
(inspect.isclass(tool) and tool.__name__ == function_name):
func = tool
Expand Down Expand Up @@ -643,24 +660,64 @@ def _chat_completion(self, messages, temperature=0.2, tools=None, stream=True, r
logging.warning(f"Tool {tool} not recognized")

try:
if stream:
# Process as streaming response with formatted tools
final_response = self._process_stream_response(
messages,
temperature,
start_time,
formatted_tools=formatted_tools if formatted_tools else None,
reasoning_steps=reasoning_steps
)
# Use the custom LLM instance if available
if self._using_custom_llm and hasattr(self, 'llm_instance'):
if stream:
# Debug logs for tool info
if formatted_tools:
logging.debug(f"Passing {len(formatted_tools)} formatted tools to LLM instance: {formatted_tools}")

# Use the LLM instance for streaming responses
final_response = self.llm_instance.get_response(
prompt=messages[1:], # Skip system message as LLM handles it separately
system_prompt=messages[0]['content'] if messages and messages[0]['role'] == 'system' else None,
temperature=temperature,
tools=formatted_tools if formatted_tools else None,
verbose=self.verbose,
markdown=self.markdown,
stream=True,
console=self.console,
execute_tool_fn=self.execute_tool,
agent_name=self.name,
agent_role=self.role,
reasoning_steps=reasoning_steps
)
else:
# Non-streaming with custom LLM
final_response = self.llm_instance.get_response(
prompt=messages[1:],
system_prompt=messages[0]['content'] if messages and messages[0]['role'] == 'system' else None,
temperature=temperature,
tools=formatted_tools if formatted_tools else None,
verbose=self.verbose,
markdown=self.markdown,
stream=False,
console=self.console,
execute_tool_fn=self.execute_tool,
agent_name=self.name,
agent_role=self.role,
reasoning_steps=reasoning_steps
)
else:
# Process as regular non-streaming response
final_response = client.chat.completions.create(
model=self.llm,
messages=messages,
temperature=temperature,
tools=formatted_tools if formatted_tools else None,
stream=False
)
# Use the standard OpenAI client approach
if stream:
# Process as streaming response with formatted tools
final_response = self._process_stream_response(
messages,
temperature,
start_time,
formatted_tools=formatted_tools if formatted_tools else None,
reasoning_steps=reasoning_steps
)
else:
# Process as regular non-streaming response
final_response = client.chat.completions.create(
model=self.llm,
messages=messages,
temperature=temperature,
tools=formatted_tools if formatted_tools else None,
stream=False
)

tool_calls = getattr(final_response.choices[0].message, 'tool_calls', None)

Expand Down Expand Up @@ -748,13 +805,26 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd

if self._using_custom_llm:
try:
# Special handling for MCP tools when using provider/model format
tool_param = self.tools if tools is None else tools

# Convert MCP tool objects to OpenAI format if needed
if tool_param is not None:
from ..mcp.mcp import MCP
if isinstance(tool_param, MCP) and hasattr(tool_param, 'to_openai_tool'):
logging.debug("Converting MCP tool to OpenAI format")
openai_tool = tool_param.to_openai_tool()
if openai_tool:
tool_param = [openai_tool]
logging.debug(f"Converted MCP tool: {tool_param}")

# Pass everything to LLM class
response_text = self.llm_instance.get_response(
prompt=prompt,
system_prompt=f"{self.backstory}\n\nYour Role: {self.role}\n\nYour Goal: {self.goal}" if self.use_system_prompt else None,
chat_history=self.chat_history,
temperature=temperature,
tools=self.tools if tools is None else tools,
tools=tool_param,
output_json=output_json,
output_pydantic=output_pydantic,
verbose=self.verbose,
Expand Down
14 changes: 10 additions & 4 deletions src/praisonai-agents/praisonaiagents/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -289,15 +289,21 @@ def get_response(
if tools:
formatted_tools = []
for tool in tools:
if callable(tool):
# Check if the tool is already in OpenAI format (e.g. from MCP.to_openai_tool())
if isinstance(tool, dict) and 'type' in tool and tool['type'] == 'function':
logging.debug(f"Using pre-formatted OpenAI tool: {tool['function']['name']}")
formatted_tools.append(tool)
elif callable(tool):
tool_def = self._generate_tool_definition(tool.__name__)
if tool_def:
formatted_tools.append(tool_def)
elif isinstance(tool, str):
tool_def = self._generate_tool_definition(tool)
if tool_def:
formatted_tools.append(tool_def)
else:
continue
logging.debug(f"Skipping tool of unsupported type: {type(tool)}")

if tool_def:
formatted_tools.append(tool_def)
if not formatted_tools:
formatted_tools = None

Expand Down
39 changes: 39 additions & 0 deletions src/praisonai-agents/praisonaiagents/mcp/mcp.py
Original file line number Diff line number Diff line change
Expand Up @@ -313,6 +313,45 @@ def __iter__(self) -> Iterable[Callable]:
"""
return iter(self._tools)

def to_openai_tool(self):
"""Convert the MCP tool to an OpenAI-compatible tool definition.

This method is specifically invoked by the Agent class when using
provider/model format (e.g., "openai/gpt-4o-mini").

Returns:
dict: OpenAI-compatible tool definition
"""
# For simplicity, we'll convert the first tool only if multiple exist
# More complex implementations could handle multiple tools
if not self.runner.tools:
logging.warning("No MCP tools available to convert to OpenAI format")
return None

# Get the first tool's schema
tool = self.runner.tools[0]

# Create OpenAI tool definition
parameters = {}
if hasattr(tool, 'inputSchema') and tool.inputSchema:
parameters = tool.inputSchema
else:
# Create a minimal schema if none exists
parameters = {
"type": "object",
"properties": {},
"required": []
}

return {
"type": "function",
"function": {
"name": tool.name,
"description": tool.description if hasattr(tool, 'description') else f"Call the {tool.name} tool",
"parameters": parameters
}
}

def __del__(self):
"""Clean up resources when the object is garbage collected."""
if hasattr(self, 'runner'):
Expand Down
5 changes: 3 additions & 2 deletions src/praisonai-agents/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,15 +4,16 @@ build-backend = "setuptools.build_meta"

[project]
name = "praisonaiagents"
version = "0.0.67"
version = "0.0.68"
description = "Praison AI agents for completing complex tasks with Self Reflection Agents"
authors = [
{ name="Mervin Praison" }
]
dependencies = [
"pydantic",
"rich",
"openai"
"openai",
"mcp==1.6.0"
]

[project.optional-dependencies]
Expand Down
Loading
Loading