Skip to content

Commit f3c1516

Browse files
Merge pull request #396 from MervinPraison/develop
v2.0.77
2 parents 38987f3 + b1d8dbb commit f3c1516

11 files changed

Lines changed: 413 additions & 19 deletions

File tree

docker/Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
FROM python:3.11-slim
22
WORKDIR /app
33
COPY . .
4-
RUN pip install flask praisonai==2.0.76 gunicorn markdown
4+
RUN pip install flask praisonai==2.0.77 gunicorn markdown
55
EXPOSE 8080
66
CMD ["gunicorn", "-b", "0.0.0.0:8080", "api:app"]

docs/api/praisonai/deploy.html

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,7 @@ <h2 id="raises">Raises</h2>
110110
file.write(&#34;FROM python:3.11-slim\n&#34;)
111111
file.write(&#34;WORKDIR /app\n&#34;)
112112
file.write(&#34;COPY . .\n&#34;)
113-
file.write(&#34;RUN pip install flask praisonai==2.0.76 gunicorn markdown\n&#34;)
113+
file.write(&#34;RUN pip install flask praisonai==2.0.77 gunicorn markdown\n&#34;)
114114
file.write(&#34;EXPOSE 8080\n&#34;)
115115
file.write(&#39;CMD [&#34;gunicorn&#34;, &#34;-b&#34;, &#34;0.0.0.0:8080&#34;, &#34;api:app&#34;]\n&#39;)
116116

praisonai.rb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ class Praisonai < Formula
33

44
desc "AI tools for various AI applications"
55
homepage "https://github.com/MervinPraison/PraisonAI"
6-
url "https://github.com/MervinPraison/PraisonAI/archive/refs/tags/2.0.76.tar.gz"
6+
url "https://github.com/MervinPraison/PraisonAI/archive/refs/tags/2.0.77.tar.gz"
77
sha256 "1828fb9227d10f991522c3f24f061943a254b667196b40b1a3e4a54a8d30ce32" # Replace with actual SHA256 checksum
88
license "MIT"
99

praisonai/deploy.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ def create_dockerfile(self):
5656
file.write("FROM python:3.11-slim\n")
5757
file.write("WORKDIR /app\n")
5858
file.write("COPY . .\n")
59-
file.write("RUN pip install flask praisonai==2.0.76 gunicorn markdown\n")
59+
file.write("RUN pip install flask praisonai==2.0.77 gunicorn markdown\n")
6060
file.write("EXPOSE 8080\n")
6161
file.write('CMD ["gunicorn", "-b", "0.0.0.0:8080", "api:app"]\n')
6262

pyproject.toml

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "PraisonAI"
3-
version = "2.0.76"
3+
version = "2.0.77"
44
description = "PraisonAI is an AI Agents Framework with Self Reflection. PraisonAI application combines PraisonAI Agents, AutoGen, and CrewAI into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customisation, and efficient human-agent collaboration."
55
readme = "README.md"
66
license = ""
@@ -12,7 +12,7 @@ dependencies = [
1212
"rich>=13.7",
1313
"markdown>=3.5",
1414
"pyparsing>=3.0.0",
15-
"praisonaiagents>=0.0.61",
15+
"praisonaiagents>=0.0.62",
1616
"python-dotenv>=0.19.0",
1717
"instructor>=1.3.3",
1818
"PyYAML>=6.0",
@@ -84,7 +84,7 @@ autogen = ["pyautogen>=0.2.19", "praisonai-tools>=0.0.7", "crewai"]
8484

8585
[tool.poetry]
8686
name = "PraisonAI"
87-
version = "2.0.76"
87+
version = "2.0.77"
8888
description = "PraisonAI is an AI Agents Framework with Self Reflection. PraisonAI application combines PraisonAI Agents, AutoGen, and CrewAI into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customisation, and efficient human–agent collaboration."
8989
authors = ["Mervin Praison"]
9090
license = ""
@@ -102,7 +102,7 @@ python = ">=3.10,<3.13"
102102
rich = ">=13.7"
103103
markdown = ">=3.5"
104104
pyparsing = ">=3.0.0"
105-
praisonaiagents = ">=0.0.61"
105+
praisonaiagents = ">=0.0.62"
106106
python-dotenv = ">=0.19.0"
107107
instructor = ">=1.3.3"
108108
PyYAML = ">=6.0"
Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
from praisonaiagents import Agent
2+
from praisonaiagents.tools import wiki_search, wiki_summary, wiki_page, wiki_random, wiki_language
3+
4+
agent1 = Agent(
5+
instructions="You are a Wikipedia Agent",
6+
tools=[wiki_search, wiki_summary, wiki_page, wiki_random, wiki_language],
7+
llm="openai/gpt-4o-mini",
8+
verbose=10
9+
)
10+
agent1.start("history of AI in 1 line")
11+
12+
agent2 = Agent(
13+
instructions="You are a Wikipedia Agent",
14+
tools=[wiki_search, wiki_summary, wiki_page, wiki_random, wiki_language],
15+
llm="gpt-4o-mini",
16+
verbose=10
17+
)
18+
agent2.start("history of AI in 1 line")

src/praisonai-agents/praisonaiagents/agent/agent.py

Lines changed: 69 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -714,6 +714,22 @@ def _chat_completion(self, messages, temperature=0.2, tools=None, stream=True, r
714714
return None
715715

716716
def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False):
717+
# Log all parameter values when in debug mode
718+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
719+
param_info = {
720+
"prompt": str(prompt)[:100] + "..." if isinstance(prompt, str) and len(str(prompt)) > 100 else str(prompt),
721+
"temperature": temperature,
722+
"tools": [t.__name__ if hasattr(t, "__name__") else str(t) for t in tools] if tools else None,
723+
"output_json": str(output_json.__class__.__name__) if output_json else None,
724+
"output_pydantic": str(output_pydantic.__class__.__name__) if output_pydantic else None,
725+
"reasoning_steps": reasoning_steps,
726+
"agent_name": self.name,
727+
"agent_role": self.role,
728+
"agent_goal": self.goal
729+
}
730+
logging.debug(f"Agent.chat parameters: {json.dumps(param_info, indent=2, default=str)}")
731+
732+
start_time = time.time()
717733
reasoning_steps = reasoning_steps or self.reasoning_steps
718734
# Search for existing knowledge if any knowledge is provided
719735
if self.knowledge:
@@ -738,7 +754,7 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd
738754
system_prompt=f"{self.backstory}\n\nYour Role: {self.role}\n\nYour Goal: {self.goal}" if self.use_system_prompt else None,
739755
chat_history=self.chat_history,
740756
temperature=temperature,
741-
tools=tools,
757+
tools=self.tools if tools is None else tools,
742758
output_json=output_json,
743759
output_pydantic=output_pydantic,
744760
verbose=self.verbose,
@@ -749,14 +765,19 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd
749765
console=self.console,
750766
agent_name=self.name,
751767
agent_role=self.role,
752-
agent_tools=[t.__name__ if hasattr(t, '__name__') else str(t) for t in self.tools],
768+
agent_tools=[t.__name__ if hasattr(t, '__name__') else str(t) for t in (tools if tools is not None else self.tools)],
753769
execute_tool_fn=self.execute_tool, # Pass tool execution function
754770
reasoning_steps=reasoning_steps
755771
)
756772

757773
self.chat_history.append({"role": "user", "content": prompt})
758774
self.chat_history.append({"role": "assistant", "content": response_text})
759775

776+
# Log completion time if in debug mode
777+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
778+
total_time = time.time() - start_time
779+
logging.debug(f"Agent.chat completed in {total_time:.2f} seconds")
780+
760781
return response_text
761782
except Exception as e:
762783
display_error(f"Error in LLM chat: {e}")
@@ -944,6 +965,13 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd
944965
display_error(f"Error in chat: {e}", console=self.console)
945966
return None
946967

968+
# Log completion time if in debug mode
969+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
970+
total_time = time.time() - start_time
971+
logging.debug(f"Agent.chat completed in {total_time:.2f} seconds")
972+
973+
return response_text
974+
947975
def clean_json_output(self, output: str) -> str:
948976
"""Clean and extract JSON from response text."""
949977
cleaned = output.strip()
@@ -958,6 +986,22 @@ def clean_json_output(self, output: str) -> str:
958986

959987
async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False):
960988
"""Async version of chat method. TODO: Requires Syncing with chat method."""
989+
# Log all parameter values when in debug mode
990+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
991+
param_info = {
992+
"prompt": str(prompt)[:100] + "..." if isinstance(prompt, str) and len(str(prompt)) > 100 else str(prompt),
993+
"temperature": temperature,
994+
"tools": [t.__name__ if hasattr(t, "__name__") else str(t) for t in tools] if tools else None,
995+
"output_json": str(output_json.__class__.__name__) if output_json else None,
996+
"output_pydantic": str(output_pydantic.__class__.__name__) if output_pydantic else None,
997+
"reasoning_steps": reasoning_steps,
998+
"agent_name": self.name,
999+
"agent_role": self.role,
1000+
"agent_goal": self.goal
1001+
}
1002+
logging.debug(f"Agent.achat parameters: {json.dumps(param_info, indent=2, default=str)}")
1003+
1004+
start_time = time.time()
9611005
reasoning_steps = reasoning_steps or self.reasoning_steps
9621006
try:
9631007
# Search for existing knowledge if any knowledge is provided
@@ -996,9 +1040,15 @@ async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None
9961040
self.chat_history.append({"role": "user", "content": prompt})
9971041
self.chat_history.append({"role": "assistant", "content": response_text})
9981042

1043+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1044+
total_time = time.time() - start_time
1045+
logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
9991046
return response_text
10001047
except Exception as e:
10011048
display_error(f"Error in LLM chat: {e}")
1049+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1050+
total_time = time.time() - start_time
1051+
logging.debug(f"Agent.achat failed in {total_time:.2f} seconds: {str(e)}")
10021052
return None
10031053

10041054
# For OpenAI client
@@ -1081,7 +1131,11 @@ async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None
10811131
temperature=temperature,
10821132
tools=formatted_tools
10831133
)
1084-
return await self._achat_completion(response, tools)
1134+
result = await self._achat_completion(response, tools)
1135+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1136+
total_time = time.time() - start_time
1137+
logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
1138+
return result
10851139
elif output_json or output_pydantic:
10861140
response = await async_client.chat.completions.create(
10871141
model=self.llm,
@@ -1090,19 +1144,31 @@ async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None
10901144
response_format={"type": "json_object"}
10911145
)
10921146
# Return the raw response
1147+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1148+
total_time = time.time() - start_time
1149+
logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
10931150
return response.choices[0].message.content
10941151
else:
10951152
response = await async_client.chat.completions.create(
10961153
model=self.llm,
10971154
messages=messages,
10981155
temperature=temperature
10991156
)
1157+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1158+
total_time = time.time() - start_time
1159+
logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
11001160
return response.choices[0].message.content
11011161
except Exception as e:
11021162
display_error(f"Error in chat completion: {e}")
1163+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1164+
total_time = time.time() - start_time
1165+
logging.debug(f"Agent.achat failed in {total_time:.2f} seconds: {str(e)}")
11031166
return None
11041167
except Exception as e:
11051168
display_error(f"Error in achat: {e}")
1169+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1170+
total_time = time.time() - start_time
1171+
logging.debug(f"Agent.achat failed in {total_time:.2f} seconds: {str(e)}")
11061172
return None
11071173

11081174
async def _achat_completion(self, response, tools, reasoning_steps=False):

0 commit comments

Comments
 (0)