Skip to content

Commit 6fd84f2

Browse files
committed
Update versioning and dependencies in project files
- Incremented version of PraisonAI from 0.0.99 to 0.0.101 in pyproject.toml and uv.lock. - Added 'litellm' dependency to memory requirements for enhanced functionality. - Updated .gitignore to include 'CopilotKit*' for better file management. - Optimised TaskOutput instantiation in agent.py for clarity. - Refined memory handling in memory.py to utilise LiteLLM for consistency. - Improved model extraction logic in task.py for better fallback handling.
1 parent ec18d4a commit 6fd84f2

8 files changed

Lines changed: 72 additions & 16 deletions

File tree

.gitignore

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -74,4 +74,5 @@ agents/praisonaiagents/praisonaiagents.egg-info
7474
.praison
7575
# Local Netlify folder
7676
.netlify
77-
.qodo
77+
.qodo
78+
CopilotKit*
Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
from praisonaiagents import Agent
2+
3+
def validate_content(data):
4+
if len(str(data)) < 50:
5+
return False, "Content too short"
6+
return True, data
7+
8+
agent = Agent(
9+
instructions="You are a writer",
10+
guardrail=validate_content,
11+
max_guardrail_retries=1
12+
)
13+
14+
agent.start("Write a welcome message with 5 words")

src/praisonai-agents/praisonaiagents/agent/agent.py

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -619,12 +619,9 @@ def _apply_guardrail_with_retry(self, response_text, prompt, temperature=0.2, to
619619
while retry_count <= self.max_guardrail_retries:
620620
# Create TaskOutput object
621621
task_output = TaskOutput(
622+
description="Agent response output",
622623
raw=current_response,
623-
output=current_response,
624-
pydantic=None,
625-
json_dict=None,
626-
name=f"{self.name}_output",
627-
description="Agent response output"
624+
agent=self.name
628625
)
629626

630627
# Process guardrail

src/praisonai-agents/praisonaiagents/memory/memory.py

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -910,11 +910,14 @@ def calculate_quality_metrics(
910910
"""
911911

912912
try:
913-
# Use OpenAI client from main.py
914-
from ..main import client
913+
# Use LiteLLM for consistency with the rest of the codebase
914+
import litellm
915915

916-
response = client.chat.completions.create(
917-
model=llm or "gpt-4o",
916+
# Convert model name if it's in litellm format
917+
model_name = llm or "gpt-4o-mini"
918+
919+
response = litellm.completion(
920+
model=model_name,
918921
messages=[{
919922
"role": "user",
920923
"content": custom_prompt or default_prompt

src/praisonai-agents/praisonaiagents/task/task.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -308,7 +308,11 @@ async def execute_callback(self, task_output: TaskOutput) -> None:
308308
if self.agent:
309309
if getattr(self.agent, '_using_custom_llm', False) and hasattr(self.agent, 'llm_instance'):
310310
# For custom LLM instances (like Ollama)
311-
llm_model = self.agent.llm_instance
311+
# Extract the model name from the LLM instance
312+
if hasattr(self.agent.llm_instance, 'model'):
313+
llm_model = self.agent.llm_instance.model
314+
else:
315+
llm_model = "gpt-4o-mini" # Default fallback
312316
elif hasattr(self.agent, 'llm') and self.agent.llm:
313317
# For standard model strings
314318
llm_model = self.agent.llm

src/praisonai-agents/pyproject.toml

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
44

55
[project]
66
name = "praisonaiagents"
7-
version = "0.0.99"
7+
version = "0.0.101"
88
description = "Praison AI agents for completing complex tasks with Self Reflection Agents"
99
requires-python = ">=3.10"
1010
authors = [
@@ -25,7 +25,8 @@ mcp = [
2525
]
2626

2727
memory = [
28-
"chromadb>=1.0.0"
28+
"chromadb>=1.0.0",
29+
"litellm>=1.50.0",
2930
]
3031

3132
knowledge = [
@@ -63,5 +64,6 @@ all = [
6364
"praisonaiagents[api]"
6465
]
6566

66-
[tool.setuptools]
67-
packages = ["praisonaiagents"]
67+
[tool.setuptools.packages.find]
68+
where = ["."]
69+
include = ["praisonaiagents*"]

src/praisonai-agents/test.py

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
from praisonaiagents import Agent, Task, PraisonAIAgents
2+
import os
3+
from dotenv import load_dotenv
4+
5+
load_dotenv()
6+
7+
llm_config = {
8+
"model": "openai/gpt-4o-mini",
9+
"api_key": os.getenv('OPENAI_API_KEY'),
10+
"temperature": 0.7,
11+
"max_tokens": 2000
12+
}
13+
14+
blog_agent = Agent(
15+
role="Blog Writer",
16+
goal="Write a blog post about AI",
17+
backstory="Expert at writing blog posts",
18+
llm="gpt-4o-mini",
19+
)
20+
21+
blog_task = Task(
22+
description="Write a blog post about AI trends in 1 paragraph",
23+
expected_output="Well-written blog post about AI trends",
24+
agent=blog_agent
25+
)
26+
27+
agents = PraisonAIAgents(
28+
agents=[blog_agent],
29+
tasks=[blog_task],
30+
memory=True
31+
)
32+
33+
result = agents.start()

src/praisonai-agents/uv.lock

Lines changed: 3 additions & 1 deletion
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)