Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion docker/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
FROM python:3.11-slim
WORKDIR /app
COPY . .
RUN pip install flask praisonai==2.0.76 gunicorn markdown
RUN pip install flask praisonai==2.0.77 gunicorn markdown
EXPOSE 8080
CMD ["gunicorn", "-b", "0.0.0.0:8080", "api:app"]
2 changes: 1 addition & 1 deletion docs/api/praisonai/deploy.html
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ <h2 id="raises">Raises</h2>
file.write(&#34;FROM python:3.11-slim\n&#34;)
file.write(&#34;WORKDIR /app\n&#34;)
file.write(&#34;COPY . .\n&#34;)
file.write(&#34;RUN pip install flask praisonai==2.0.76 gunicorn markdown\n&#34;)
file.write(&#34;RUN pip install flask praisonai==2.0.77 gunicorn markdown\n&#34;)
file.write(&#34;EXPOSE 8080\n&#34;)
file.write(&#39;CMD [&#34;gunicorn&#34;, &#34;-b&#34;, &#34;0.0.0.0:8080&#34;, &#34;api:app&#34;]\n&#39;)

Expand Down
2 changes: 1 addition & 1 deletion praisonai.rb
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ class Praisonai < Formula

desc "AI tools for various AI applications"
homepage "https://github.com/MervinPraison/PraisonAI"
url "https://github.com/MervinPraison/PraisonAI/archive/refs/tags/2.0.76.tar.gz"
url "https://github.com/MervinPraison/PraisonAI/archive/refs/tags/2.0.77.tar.gz"
sha256 "1828fb9227d10f991522c3f24f061943a254b667196b40b1a3e4a54a8d30ce32" # Replace with actual SHA256 checksum
license "MIT"

Expand Down
2 changes: 1 addition & 1 deletion praisonai/deploy.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def create_dockerfile(self):
file.write("FROM python:3.11-slim\n")
file.write("WORKDIR /app\n")
file.write("COPY . .\n")
file.write("RUN pip install flask praisonai==2.0.76 gunicorn markdown\n")
file.write("RUN pip install flask praisonai==2.0.77 gunicorn markdown\n")
file.write("EXPOSE 8080\n")
file.write('CMD ["gunicorn", "-b", "0.0.0.0:8080", "api:app"]\n')

Expand Down
8 changes: 4 additions & 4 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "PraisonAI"
version = "2.0.76"
version = "2.0.77"
description = "PraisonAI is an AI Agents Framework with Self Reflection. PraisonAI application combines PraisonAI Agents, AutoGen, and CrewAI into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customisation, and efficient human-agent collaboration."
readme = "README.md"
license = ""
Expand All @@ -12,7 +12,7 @@ dependencies = [
"rich>=13.7",
"markdown>=3.5",
"pyparsing>=3.0.0",
"praisonaiagents>=0.0.61",
"praisonaiagents>=0.0.62",
"python-dotenv>=0.19.0",
"instructor>=1.3.3",
"PyYAML>=6.0",
Expand Down Expand Up @@ -84,7 +84,7 @@ autogen = ["pyautogen>=0.2.19", "praisonai-tools>=0.0.7", "crewai"]

[tool.poetry]
name = "PraisonAI"
version = "2.0.76"
version = "2.0.77"
description = "PraisonAI is an AI Agents Framework with Self Reflection. PraisonAI application combines PraisonAI Agents, AutoGen, and CrewAI into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customisation, and efficient human–agent collaboration."
authors = ["Mervin Praison"]
license = ""
Expand All @@ -102,7 +102,7 @@ python = ">=3.10,<3.13"
rich = ">=13.7"
markdown = ">=3.5"
pyparsing = ">=3.0.0"
praisonaiagents = ">=0.0.61"
praisonaiagents = ">=0.0.62"
python-dotenv = ">=0.19.0"
instructor = ">=1.3.3"
PyYAML = ">=6.0"
Expand Down
18 changes: 18 additions & 0 deletions src/praisonai-agents/llm-tool-call.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
from praisonaiagents import Agent
from praisonaiagents.tools import wiki_search, wiki_summary, wiki_page, wiki_random, wiki_language

agent1 = Agent(
instructions="You are a Wikipedia Agent",
tools=[wiki_search, wiki_summary, wiki_page, wiki_random, wiki_language],
llm="openai/gpt-4o-mini",
verbose=10
)
agent1.start("history of AI in 1 line")

agent2 = Agent(
instructions="You are a Wikipedia Agent",
tools=[wiki_search, wiki_summary, wiki_page, wiki_random, wiki_language],
llm="gpt-4o-mini",
verbose=10
)
agent2.start("history of AI in 1 line")
72 changes: 69 additions & 3 deletions src/praisonai-agents/praisonaiagents/agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -714,6 +714,22 @@ def _chat_completion(self, messages, temperature=0.2, tools=None, stream=True, r
return None

def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False):
# Log all parameter values when in debug mode
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
param_info = {
"prompt": str(prompt)[:100] + "..." if isinstance(prompt, str) and len(str(prompt)) > 100 else str(prompt),
"temperature": temperature,
"tools": [t.__name__ if hasattr(t, "__name__") else str(t) for t in tools] if tools else None,
"output_json": str(output_json.__class__.__name__) if output_json else None,
"output_pydantic": str(output_pydantic.__class__.__name__) if output_pydantic else None,
"reasoning_steps": reasoning_steps,
"agent_name": self.name,
"agent_role": self.role,
"agent_goal": self.goal
}
logging.debug(f"Agent.chat parameters: {json.dumps(param_info, indent=2, default=str)}")

start_time = time.time()
reasoning_steps = reasoning_steps or self.reasoning_steps
# Search for existing knowledge if any knowledge is provided
if self.knowledge:
Expand All @@ -738,7 +754,7 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd
system_prompt=f"{self.backstory}\n\nYour Role: {self.role}\n\nYour Goal: {self.goal}" if self.use_system_prompt else None,
chat_history=self.chat_history,
temperature=temperature,
tools=tools,
tools=self.tools if tools is None else tools,
output_json=output_json,
output_pydantic=output_pydantic,
verbose=self.verbose,
Expand All @@ -749,14 +765,19 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd
console=self.console,
agent_name=self.name,
agent_role=self.role,
agent_tools=[t.__name__ if hasattr(t, '__name__') else str(t) for t in self.tools],
agent_tools=[t.__name__ if hasattr(t, '__name__') else str(t) for t in (tools if tools is not None else self.tools)],
execute_tool_fn=self.execute_tool, # Pass tool execution function
reasoning_steps=reasoning_steps
)

self.chat_history.append({"role": "user", "content": prompt})
self.chat_history.append({"role": "assistant", "content": response_text})

# Log completion time if in debug mode
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
total_time = time.time() - start_time
logging.debug(f"Agent.chat completed in {total_time:.2f} seconds")

return response_text
except Exception as e:
display_error(f"Error in LLM chat: {e}")
Expand Down Expand Up @@ -944,6 +965,13 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd
display_error(f"Error in chat: {e}", console=self.console)
return None

# Log completion time if in debug mode
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
total_time = time.time() - start_time
logging.debug(f"Agent.chat completed in {total_time:.2f} seconds")

return response_text

def clean_json_output(self, output: str) -> str:
"""Clean and extract JSON from response text."""
cleaned = output.strip()
Expand All @@ -958,6 +986,22 @@ def clean_json_output(self, output: str) -> str:

async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False):
"""Async version of chat method. TODO: Requires Syncing with chat method."""
# Log all parameter values when in debug mode
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
param_info = {
"prompt": str(prompt)[:100] + "..." if isinstance(prompt, str) and len(str(prompt)) > 100 else str(prompt),
"temperature": temperature,
"tools": [t.__name__ if hasattr(t, "__name__") else str(t) for t in tools] if tools else None,
"output_json": str(output_json.__class__.__name__) if output_json else None,
"output_pydantic": str(output_pydantic.__class__.__name__) if output_pydantic else None,
"reasoning_steps": reasoning_steps,
"agent_name": self.name,
"agent_role": self.role,
"agent_goal": self.goal
}
logging.debug(f"Agent.achat parameters: {json.dumps(param_info, indent=2, default=str)}")

start_time = time.time()
reasoning_steps = reasoning_steps or self.reasoning_steps
try:
# Search for existing knowledge if any knowledge is provided
Expand Down Expand Up @@ -996,9 +1040,15 @@ async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None
self.chat_history.append({"role": "user", "content": prompt})
self.chat_history.append({"role": "assistant", "content": response_text})

if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
total_time = time.time() - start_time
logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
return response_text
except Exception as e:
display_error(f"Error in LLM chat: {e}")
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
total_time = time.time() - start_time
logging.debug(f"Agent.achat failed in {total_time:.2f} seconds: {str(e)}")
return None

# For OpenAI client
Expand Down Expand Up @@ -1081,7 +1131,11 @@ async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None
temperature=temperature,
tools=formatted_tools
)
return await self._achat_completion(response, tools)
result = await self._achat_completion(response, tools)
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
total_time = time.time() - start_time
logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
return result
elif output_json or output_pydantic:
response = await async_client.chat.completions.create(
model=self.llm,
Expand All @@ -1090,19 +1144,31 @@ async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None
response_format={"type": "json_object"}
)
# Return the raw response
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
total_time = time.time() - start_time
logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
return response.choices[0].message.content
else:
response = await async_client.chat.completions.create(
model=self.llm,
messages=messages,
temperature=temperature
)
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
total_time = time.time() - start_time
logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
return response.choices[0].message.content
except Exception as e:
display_error(f"Error in chat completion: {e}")
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
total_time = time.time() - start_time
logging.debug(f"Agent.achat failed in {total_time:.2f} seconds: {str(e)}")
return None
except Exception as e:
display_error(f"Error in achat: {e}")
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
total_time = time.time() - start_time
logging.debug(f"Agent.achat failed in {total_time:.2f} seconds: {str(e)}")
return None

async def _achat_completion(self, response, tools, reasoning_steps=False):
Expand Down
Loading
Loading