Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion docker/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ RUN mkdir -p /root/.praison
# Install Python packages (using latest versions)
RUN pip install --no-cache-dir \
flask \
"praisonai>=2.2.27" \
"praisonai>=2.2.28" \
"praisonai[api]" \
gunicorn \
markdown
Expand Down
2 changes: 1 addition & 1 deletion docker/Dockerfile.chat
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ RUN mkdir -p /root/.praison
# Install Python packages (using latest versions)
RUN pip install --no-cache-dir \
praisonai_tools \
"praisonai>=2.2.27" \
"praisonai>=2.2.28" \
"praisonai[chat]" \
"embedchain[github,youtube]"

Expand Down
2 changes: 1 addition & 1 deletion docker/Dockerfile.dev
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ RUN mkdir -p /root/.praison
# Install Python packages (using latest versions)
RUN pip install --no-cache-dir \
praisonai_tools \
"praisonai>=2.2.27" \
"praisonai>=2.2.28" \
"praisonai[ui]" \
"praisonai[chat]" \
"praisonai[realtime]" \
Expand Down
2 changes: 1 addition & 1 deletion docker/Dockerfile.ui
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ RUN mkdir -p /root/.praison
# Install Python packages (using latest versions)
RUN pip install --no-cache-dir \
praisonai_tools \
"praisonai>=2.2.27" \
"praisonai>=2.2.28" \
"praisonai[ui]" \
"praisonai[crewai]"

Expand Down
4 changes: 2 additions & 2 deletions docker/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ healthcheck:
## 📦 Package Versions

All Docker images use consistent, up-to-date versions:
- PraisonAI: `>=2.2.27`
- PraisonAI: `>=2.2.28`
- PraisonAI Agents: `>=0.0.92`
- Python: `3.11-slim`

Expand Down Expand Up @@ -218,7 +218,7 @@ docker-compose up -d
### Version Pinning
To use specific versions, update the Dockerfile:
```dockerfile
RUN pip install "praisonai==2.2.27" "praisonaiagents==0.0.92"
RUN pip install "praisonai==2.2.28" "praisonaiagents==0.0.92"
```

## 🌐 Production Deployment
Expand Down
96 changes: 96 additions & 0 deletions src/praisonai-agents/debug_comparison.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
#!/usr/bin/env python3

import sys
import traceback
from praisonaiagents import Agent, MCP

def test_agent_direct():
"""Test gpt-4o-mini (agent.py path)"""
print("=" * 50)
print("TESTING: gpt-4o-mini (agent.py direct calls)")
print("=" * 50)

try:
agent = Agent(
instructions="""You are a helpful assistant that can break down complex problems.
Use the available tools when relevant to perform step-by-step analysis.""",
llm="gpt-4o-mini",
tools=MCP("npx -y @modelcontextprotocol/server-sequential-thinking")
)

print("✅ Agent created successfully")
print(f"✅ Agent LLM: {getattr(agent, 'llm', 'Not set')}")
print(f"✅ Agent using custom LLM: {getattr(agent, '_using_custom_llm', False)}")

result = agent.start("What are 3 steps to make coffee?")
print("✅ Agent execution completed successfully")
return True, result

except Exception as e:
print(f"❌ Error in agent direct: {e}")
traceback.print_exc()
return False, str(e)

def test_llm_class():
"""Test openai/gpt-4o-mini (llm.py path)"""
print("\n" + "=" * 50)
print("TESTING: openai/gpt-4o-mini (llm.py LiteLLM)")
print("=" * 50)

try:
agent = Agent(
instructions="""You are a helpful assistant that can break down complex problems.
Use the available tools when relevant to perform step-by-step analysis.""",
llm="openai/gpt-4o-mini",
tools=MCP("npx -y @modelcontextprotocol/server-sequential-thinking")
)

print("✅ Agent created successfully")
print(f"✅ Agent LLM instance: {getattr(agent, 'llm_instance', 'Not set')}")
print(f"✅ Agent using custom LLM: {getattr(agent, '_using_custom_llm', False)}")

result = agent.start("What are 3 steps to make coffee?")
print("✅ Agent execution completed successfully")
return True, result

except Exception as e:
print(f"❌ Error in llm class: {e}")
traceback.print_exc()
return False, str(e)

if __name__ == "__main__":
print("🔍 DEBUGGING: Comparing both LLM approaches\n")

# Test agent direct
success1, result1 = test_agent_direct()

# Test LLM class
success2, result2 = test_llm_class()

print("\n" + "=" * 50)
print("FINAL RESULTS")
print("=" * 50)

if success1:
print("✅ gpt-4o-mini (agent.py) - SUCCESS")
else:
print("❌ gpt-4o-mini (agent.py) - FAILED")
print(f" Error: {result1}")

if success2:
print("✅ openai/gpt-4o-mini (llm.py) - SUCCESS")
else:
print("❌ openai/gpt-4o-mini (llm.py) - FAILED")
print(f" Error: {result2}")

if success1 and success2:
print("\n🎉 BOTH FORMATS WORK CORRECTLY!")
print("📝 The issue mentioned might be resolved or was a different problem.")
elif success1 and not success2:
print("\n⚠️ CONFIRMED: LLM class path has issues")
print("📝 Need to debug the LLM class implementation")
elif success2 and not success1:
print("\n⚠️ CONFIRMED: Agent direct path has issues")
print("📝 Need to debug the agent direct implementation")
else:
print("\n💥 BOTH PATHS FAILED - Something is fundamentally wrong")
Loading
Loading