Skip to content

Commit 370cc3e

Browse files
Merge pull request #610 from MervinPraison/claude/issue-605-20250605_165941
fix: resolve Memory import error with litellm compatibility
2 parents 206a457 + 8c85de5 commit 370cc3e

5 files changed

Lines changed: 315 additions & 45 deletions

File tree

src/praisonai-agents/praisonaiagents/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
from .knowledge.chunking import Chunking
1313
from .mcp.mcp import MCP
1414
from .session import Session
15+
from .memory.memory import Memory
1516
from .guardrails import GuardrailResult, LLMGuardrail
1617
from .main import (
1718
TaskOutput,
@@ -43,6 +44,7 @@
4344
'ReflectionOutput',
4445
'AutoAgents',
4546
'Session',
47+
'Memory',
4648
'display_interaction',
4749
'display_self_reflection',
4850
'display_instruction',

src/praisonai-agents/praisonaiagents/memory/memory.py

Lines changed: 121 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,12 @@
2929
except ImportError:
3030
OPENAI_AVAILABLE = False
3131

32+
try:
33+
import litellm
34+
LITELLM_AVAILABLE = True
35+
except ImportError:
36+
LITELLM_AVAILABLE = False
37+
3238

3339

3440

@@ -340,14 +346,28 @@ def search_short_term(
340346

341347
elif self.use_rag and hasattr(self, "chroma_col"):
342348
try:
343-
from openai import OpenAI
344-
client = OpenAI()
345-
346-
response = client.embeddings.create(
347-
input=query,
348-
model="text-embedding-3-small"
349-
)
350-
query_embedding = response.data[0].embedding
349+
if LITELLM_AVAILABLE:
350+
# Use LiteLLM for consistency with the rest of the codebase
351+
import litellm
352+
353+
response = litellm.embedding(
354+
model="text-embedding-3-small",
355+
input=query
356+
)
357+
query_embedding = response.data[0]["embedding"]
358+
elif OPENAI_AVAILABLE:
359+
# Fallback to OpenAI client
360+
from openai import OpenAI
361+
client = OpenAI()
362+
363+
response = client.embeddings.create(
364+
input=query,
365+
model="text-embedding-3-small"
366+
)
367+
query_embedding = response.data[0].embedding
368+
else:
369+
self._log_verbose("Neither litellm nor openai available for embeddings", logging.WARNING)
370+
return []
351371

352372
resp = self.chroma_col.query(
353373
query_embeddings=[query_embedding],
@@ -464,19 +484,39 @@ def store_long_term(
464484
# Store in vector database if enabled
465485
if self.use_rag and hasattr(self, "chroma_col"):
466486
try:
467-
from openai import OpenAI
468-
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) # Ensure API key is correctly set
469-
470-
logger.info("Getting embeddings from OpenAI...")
471-
logger.debug(f"Embedding input text: {text}") # Log the input text
472-
473-
response = client.embeddings.create(
474-
input=text,
475-
model="text-embedding-3-small"
476-
)
477-
embedding = response.data[0].embedding
478-
logger.info("Successfully got embeddings")
479-
logger.debug(f"Received embedding of length: {len(embedding)}") # Log embedding details
487+
if LITELLM_AVAILABLE:
488+
# Use LiteLLM for consistency with the rest of the codebase
489+
import litellm
490+
491+
logger.info("Getting embeddings from LiteLLM...")
492+
logger.debug(f"Embedding input text: {text}")
493+
494+
response = litellm.embedding(
495+
model="text-embedding-3-small",
496+
input=text
497+
)
498+
embedding = response.data[0]["embedding"]
499+
logger.info("Successfully got embeddings from LiteLLM")
500+
logger.debug(f"Received embedding of length: {len(embedding)}")
501+
502+
elif OPENAI_AVAILABLE:
503+
# Fallback to OpenAI client
504+
from openai import OpenAI
505+
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
506+
507+
logger.info("Getting embeddings from OpenAI...")
508+
logger.debug(f"Embedding input text: {text}")
509+
510+
response = client.embeddings.create(
511+
input=text,
512+
model="text-embedding-3-small"
513+
)
514+
embedding = response.data[0].embedding
515+
logger.info("Successfully got embeddings from OpenAI")
516+
logger.debug(f"Received embedding of length: {len(embedding)}")
517+
else:
518+
logger.warning("Neither litellm nor openai available for embeddings")
519+
return
480520

481521
# Sanitize metadata for ChromaDB
482522
sanitized_metadata = self._sanitize_metadata(metadata)
@@ -527,15 +567,28 @@ def search_long_term(
527567

528568
elif self.use_rag and hasattr(self, "chroma_col"):
529569
try:
530-
from openai import OpenAI
531-
client = OpenAI()
532-
533-
# Get query embedding
534-
response = client.embeddings.create(
535-
input=query,
536-
model="text-embedding-3-small" # Using consistent model
537-
)
538-
query_embedding = response.data[0].embedding
570+
if LITELLM_AVAILABLE:
571+
# Use LiteLLM for consistency with the rest of the codebase
572+
import litellm
573+
574+
response = litellm.embedding(
575+
model="text-embedding-3-small",
576+
input=query
577+
)
578+
query_embedding = response.data[0]["embedding"]
579+
elif OPENAI_AVAILABLE:
580+
# Fallback to OpenAI client
581+
from openai import OpenAI
582+
client = OpenAI()
583+
584+
response = client.embeddings.create(
585+
input=query,
586+
model="text-embedding-3-small"
587+
)
588+
query_embedding = response.data[0].embedding
589+
else:
590+
self._log_verbose("Neither litellm nor openai available for embeddings", logging.WARNING)
591+
return []
539592

540593
# Search ChromaDB with embedding
541594
resp = self.chroma_col.query(
@@ -910,21 +963,44 @@ def calculate_quality_metrics(
910963
"""
911964

912965
try:
913-
# Use LiteLLM for consistency with the rest of the codebase
914-
import litellm
915-
916-
# Convert model name if it's in litellm format
917-
model_name = llm or "gpt-4o-mini"
918-
919-
response = litellm.completion(
920-
model=model_name,
921-
messages=[{
922-
"role": "user",
923-
"content": custom_prompt or default_prompt
924-
}],
925-
response_format={"type": "json_object"},
926-
temperature=0.3
927-
)
966+
if LITELLM_AVAILABLE:
967+
# Use LiteLLM for consistency with the rest of the codebase
968+
import litellm
969+
970+
# Convert model name if it's in litellm format
971+
model_name = llm or "gpt-4o-mini"
972+
973+
response = litellm.completion(
974+
model=model_name,
975+
messages=[{
976+
"role": "user",
977+
"content": custom_prompt or default_prompt
978+
}],
979+
response_format={"type": "json_object"},
980+
temperature=0.3
981+
)
982+
elif OPENAI_AVAILABLE:
983+
# Fallback to OpenAI client
984+
from openai import OpenAI
985+
client = OpenAI()
986+
987+
response = client.chat.completions.create(
988+
model=llm or "gpt-4o-mini",
989+
messages=[{
990+
"role": "user",
991+
"content": custom_prompt or default_prompt
992+
}],
993+
response_format={"type": "json_object"},
994+
temperature=0.3
995+
)
996+
else:
997+
logger.error("Neither litellm nor openai available for quality calculation")
998+
return {
999+
"completeness": 0.0,
1000+
"relevance": 0.0,
1001+
"clarity": 0.0,
1002+
"accuracy": 0.0
1003+
}
9281004

9291005
metrics = json.loads(response.choices[0].message.content)
9301006

Lines changed: 135 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,135 @@
1+
#!/usr/bin/env python3
2+
"""
3+
Comprehensive test script to verify all import scenarios work correctly
4+
Tests the original failing import from the GitHub issue
5+
"""
6+
7+
import sys
8+
import os
9+
10+
# Add the praisonai-agents source to Python path
11+
sys.path.insert(0, '/home/runner/work/PraisonAI/PraisonAI/src/praisonai-agents')
12+
13+
def test_original_failing_import():
14+
"""Test the exact import that was failing in the GitHub issue"""
15+
print("=== Testing Original Failing Import ===")
16+
try:
17+
from praisonaiagents.agents.agents import Agent, Task, PraisonAIAgents
18+
print('✅ SUCCESS: from praisonaiagents.agents.agents import Agent, Task, PraisonAIAgents')
19+
return True
20+
except ImportError as e:
21+
print(f'❌ ERROR: {e}')
22+
import traceback
23+
traceback.print_exc()
24+
return False
25+
except Exception as e:
26+
print(f'❌ UNEXPECTED ERROR: {e}')
27+
import traceback
28+
traceback.print_exc()
29+
return False
30+
31+
def test_memory_direct_import():
32+
"""Test direct Memory import"""
33+
print("\n=== Testing Direct Memory Import ===")
34+
try:
35+
from praisonaiagents.memory import Memory
36+
print('✅ SUCCESS: from praisonaiagents.memory import Memory')
37+
return True
38+
except ImportError as e:
39+
print(f'❌ ERROR: {e}')
40+
import traceback
41+
traceback.print_exc()
42+
return False
43+
except Exception as e:
44+
print(f'❌ UNEXPECTED ERROR: {e}')
45+
import traceback
46+
traceback.print_exc()
47+
return False
48+
49+
def test_memory_from_package_root():
50+
"""Test Memory import from package root"""
51+
print("\n=== Testing Memory Import from Package Root ===")
52+
try:
53+
from praisonaiagents import Memory
54+
print('✅ SUCCESS: from praisonaiagents import Memory')
55+
return True
56+
except ImportError as e:
57+
print(f'❌ ERROR: {e}')
58+
import traceback
59+
traceback.print_exc()
60+
return False
61+
except Exception as e:
62+
print(f'❌ UNEXPECTED ERROR: {e}')
63+
import traceback
64+
traceback.print_exc()
65+
return False
66+
67+
def test_session_import():
68+
"""Test Session import which depends on Memory"""
69+
print("\n=== Testing Session Import ===")
70+
try:
71+
from praisonaiagents.session import Session
72+
print('✅ SUCCESS: from praisonaiagents.session import Session')
73+
return True
74+
except ImportError as e:
75+
print(f'❌ ERROR: {e}')
76+
import traceback
77+
traceback.print_exc()
78+
return False
79+
except Exception as e:
80+
print(f'❌ UNEXPECTED ERROR: {e}')
81+
import traceback
82+
traceback.print_exc()
83+
return False
84+
85+
def test_memory_instantiation():
86+
"""Test that Memory can be instantiated without errors"""
87+
print("\n=== Testing Memory Instantiation ===")
88+
try:
89+
from praisonaiagents.memory import Memory
90+
91+
# Test with minimal config (no external dependencies)
92+
config = {"provider": "none"}
93+
memory = Memory(config=config)
94+
print('✅ SUCCESS: Memory instance created with provider="none"')
95+
96+
# Test basic methods don't fail immediately
97+
memory.store_short_term("test content", metadata={"test": True})
98+
results = memory.search_short_term("test", limit=1)
99+
print('✅ SUCCESS: Basic memory operations work')
100+
101+
return True
102+
except Exception as e:
103+
print(f'❌ ERROR: {e}')
104+
import traceback
105+
traceback.print_exc()
106+
return False
107+
108+
def run_all_tests():
109+
"""Run all tests and report results"""
110+
print("🔍 Running comprehensive import tests...")
111+
112+
tests = [
113+
test_original_failing_import,
114+
test_memory_direct_import,
115+
test_memory_from_package_root,
116+
test_session_import,
117+
test_memory_instantiation
118+
]
119+
120+
results = []
121+
for test in tests:
122+
results.append(test())
123+
124+
print(f"\n📊 Test Results: {sum(results)}/{len(results)} tests passed")
125+
126+
if all(results):
127+
print("🎉 ALL TESTS PASSED! The Memory import issue has been resolved.")
128+
else:
129+
print("❌ Some tests failed. The issue may not be fully resolved.")
130+
131+
return all(results)
132+
133+
if __name__ == "__main__":
134+
success = run_all_tests()
135+
sys.exit(0 if success else 1)
Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
#!/usr/bin/env python3
2+
"""
3+
Test script to verify Memory import works correctly
4+
"""
5+
6+
import sys
7+
import os
8+
9+
# Add the praisonai-agents source to Python path
10+
sys.path.insert(0, '/home/runner/work/PraisonAI/PraisonAI/src/praisonai-agents')
11+
12+
try:
13+
from praisonaiagents.memory import Memory
14+
print('SUCCESS: Memory import works correctly')
15+
print('Memory class found:', Memory)
16+
17+
# Try to create a minimal instance to ensure it doesn't fail immediately
18+
config = {"provider": "none"} # Use the simplest provider
19+
memory = Memory(config=config)
20+
print('SUCCESS: Memory instance created successfully')
21+
22+
except ImportError as e:
23+
print('ERROR:', e)
24+
import traceback
25+
traceback.print_exc()
26+
except Exception as e:
27+
print('UNEXPECTED ERROR:', e)
28+
import traceback
29+
traceback.print_exc()

0 commit comments

Comments
 (0)