Skip to content

Commit 206a457

Browse files
Merge pull request #609 from MervinPraison/develop
Develop
2 parents 8923c72 + ee85a63 commit 206a457

17 files changed

Lines changed: 90 additions & 34 deletions

File tree

.gitignore

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -74,4 +74,5 @@ agents/praisonaiagents/praisonaiagents.egg-info
7474
.praison
7575
# Local Netlify folder
7676
.netlify
77-
.qodo
77+
.qodo
78+
CopilotKit*

docker/Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ RUN mkdir -p /root/.praison
1616
# Install Python packages (using latest versions)
1717
RUN pip install --no-cache-dir \
1818
flask \
19-
"praisonai>=2.2.28" \
19+
"praisonai>=2.2.29" \
2020
"praisonai[api]" \
2121
gunicorn \
2222
markdown

docker/Dockerfile.chat

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ RUN mkdir -p /root/.praison
1616
# Install Python packages (using latest versions)
1717
RUN pip install --no-cache-dir \
1818
praisonai_tools \
19-
"praisonai>=2.2.28" \
19+
"praisonai>=2.2.29" \
2020
"praisonai[chat]" \
2121
"embedchain[github,youtube]"
2222

docker/Dockerfile.dev

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ RUN mkdir -p /root/.praison
2020
# Install Python packages (using latest versions)
2121
RUN pip install --no-cache-dir \
2222
praisonai_tools \
23-
"praisonai>=2.2.28" \
23+
"praisonai>=2.2.29" \
2424
"praisonai[ui]" \
2525
"praisonai[chat]" \
2626
"praisonai[realtime]" \

docker/Dockerfile.ui

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ RUN mkdir -p /root/.praison
1616
# Install Python packages (using latest versions)
1717
RUN pip install --no-cache-dir \
1818
praisonai_tools \
19-
"praisonai>=2.2.28" \
19+
"praisonai>=2.2.29" \
2020
"praisonai[ui]" \
2121
"praisonai[crewai]"
2222

docker/README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,7 @@ healthcheck:
121121
## 📦 Package Versions
122122
123123
All Docker images use consistent, up-to-date versions:
124-
- PraisonAI: `>=2.2.28`
124+
- PraisonAI: `>=2.2.29`
125125
- PraisonAI Agents: `>=0.0.92`
126126
- Python: `3.11-slim`
127127

@@ -218,7 +218,7 @@ docker-compose up -d
218218
### Version Pinning
219219
To use specific versions, update the Dockerfile:
220220
```dockerfile
221-
RUN pip install "praisonai==2.2.28" "praisonaiagents==0.0.92"
221+
RUN pip install "praisonai==2.2.29" "praisonaiagents==0.0.92"
222222
```
223223

224224
## 🌐 Production Deployment
Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
from praisonaiagents import Agent
2+
3+
def validate_content(data):
4+
if len(str(data)) < 50:
5+
return False, "Content too short"
6+
return True, data
7+
8+
agent = Agent(
9+
instructions="You are a writer",
10+
guardrail=validate_content,
11+
max_guardrail_retries=1
12+
)
13+
14+
agent.start("Write a welcome message with 5 words")

src/praisonai-agents/praisonaiagents/agent/agent.py

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -619,12 +619,9 @@ def _apply_guardrail_with_retry(self, response_text, prompt, temperature=0.2, to
619619
while retry_count <= self.max_guardrail_retries:
620620
# Create TaskOutput object
621621
task_output = TaskOutput(
622+
description="Agent response output",
622623
raw=current_response,
623-
output=current_response,
624-
pydantic=None,
625-
json_dict=None,
626-
name=f"{self.name}_output",
627-
description="Agent response output"
624+
agent=self.name
628625
)
629626

630627
# Process guardrail

src/praisonai-agents/praisonaiagents/memory/memory.py

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -910,11 +910,14 @@ def calculate_quality_metrics(
910910
"""
911911

912912
try:
913-
# Use OpenAI client from main.py
914-
from ..main import client
913+
# Use LiteLLM for consistency with the rest of the codebase
914+
import litellm
915915

916-
response = client.chat.completions.create(
917-
model=llm or "gpt-4o",
916+
# Convert model name if it's in litellm format
917+
model_name = llm or "gpt-4o-mini"
918+
919+
response = litellm.completion(
920+
model=model_name,
918921
messages=[{
919922
"role": "user",
920923
"content": custom_prompt or default_prompt

src/praisonai-agents/praisonaiagents/task/task.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -308,7 +308,11 @@ async def execute_callback(self, task_output: TaskOutput) -> None:
308308
if self.agent:
309309
if getattr(self.agent, '_using_custom_llm', False) and hasattr(self.agent, 'llm_instance'):
310310
# For custom LLM instances (like Ollama)
311-
llm_model = self.agent.llm_instance
311+
# Extract the model name from the LLM instance
312+
if hasattr(self.agent.llm_instance, 'model'):
313+
llm_model = self.agent.llm_instance.model
314+
else:
315+
llm_model = "gpt-4o-mini" # Default fallback
312316
elif hasattr(self.agent, 'llm') and self.agent.llm:
313317
# For standard model strings
314318
llm_model = self.agent.llm

0 commit comments

Comments
 (0)