Skip to content

Commit 6e3c9b7

Browse files
committed
Refactor mock completion function in mini agents tests for improved clarity and functionality
1 parent 7d89874 commit 6e3c9b7

1 file changed

Lines changed: 75 additions & 16 deletions

File tree

src/praisonai/tests/unit/agent/test_mini_agents_sequential.py

Lines changed: 75 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -13,29 +13,88 @@
1313
from unittest.mock import patch
1414

1515
def mock_completion(*args, **kwargs):
16-
"""Mock completion function to avoid calling actual OpenAI API"""
17-
class MockResponse:
18-
def __init__(self, content):
19-
self.content = content
20-
self.choices = [type('obj', (object,), {'message': type('obj', (object,), {'content': content})})]
16+
"""Mock litellm.completion function to avoid calling actual API"""
17+
stream = kwargs.get('stream', False)
18+
19+
# Determine response content based on messages
20+
messages = kwargs.get('messages', [])
21+
response_content = "mock response"
22+
23+
for message in messages:
24+
content = message.get('content', '') if isinstance(message, dict) else str(message)
25+
if 'Generate the number 42' in content:
26+
response_content = "42"
27+
break
28+
elif 'multiply it by 2' in content:
29+
response_content = "84"
30+
break
31+
32+
if stream:
33+
# Return streaming response (iterator)
34+
class MockDelta:
35+
def __init__(self, content):
36+
self.content = content
37+
38+
class MockStreamChoice:
39+
def __init__(self, content):
40+
self.delta = MockDelta(content)
41+
42+
class MockStreamChunk:
43+
def __init__(self, content):
44+
self.choices = [MockStreamChoice(content)]
45+
46+
# Return a list that can be iterated (simulating streaming chunks)
47+
return [MockStreamChunk(response_content)]
48+
else:
49+
# Return non-streaming response
50+
class MockMessage:
51+
def __init__(self, content):
52+
self.content = content
53+
54+
def get(self, key, default=None):
55+
if key == "tool_calls":
56+
return None # No tool calls in our simple test
57+
return getattr(self, key, default)
58+
59+
def __getitem__(self, key):
60+
if hasattr(self, key):
61+
return getattr(self, key)
62+
raise KeyError(key)
63+
64+
class MockChoice:
65+
def __init__(self, content):
66+
self.message = MockMessage(content)
67+
68+
def __getitem__(self, key):
69+
if key == "message":
70+
return self.message
71+
if hasattr(self, key):
72+
return getattr(self, key)
73+
raise KeyError(key)
74+
75+
class MockResponse:
76+
def __init__(self, content):
77+
self.choices = [MockChoice(content)]
2178

22-
if 'messages' in kwargs:
23-
if any('Generate the number 42' in str(m.get('content', '')) for m in kwargs.get('messages', [])):
24-
return MockResponse("42")
25-
elif any('multiply it by 2' in str(m.get('content', '')) for m in kwargs.get('messages', [])):
26-
return MockResponse("84")
27-
return MockResponse("mock response")
79+
def __getitem__(self, key):
80+
# Support dictionary-style access
81+
if key == "choices":
82+
return self.choices
83+
if hasattr(self, key):
84+
return getattr(self, key)
85+
raise KeyError(key)
86+
87+
return MockResponse(response_content)
2888

29-
@patch('praisonai.inc.models.PraisonAIModel.chat', side_effect=mock_completion)
30-
@patch('praisonai.inc.models.PraisonAIModel.stream_chat', side_effect=mock_completion)
31-
def test_mini_agents_sequential_data_passing(mock_stream, mock_chat):
89+
@patch('litellm.completion', side_effect=mock_completion)
90+
def test_mini_agents_sequential_data_passing(mock_litellm):
3291
"""Test that output from previous task is passed to next task in Mini Agents"""
3392

3493
print("Testing Mini Agents Sequential Data Passing...")
3594

3695
# Create two agents for sequential processing
37-
agent1 = Agent(instructions="Generate the number 42 as your output. Only return the number 42, nothing else.", model_name="gpt-3.5-turbo")
38-
agent2 = Agent(instructions="Take the input number and multiply it by 2. Only return the result number, nothing else.", model_name="gpt-3.5-turbo")
96+
agent1 = Agent(instructions="Generate the number 42 as your output. Only return the number 42, nothing else.", llm={'model': 'gpt-3.5-turbo'})
97+
agent2 = Agent(instructions="Take the input number and multiply it by 2. Only return the result number, nothing else.", llm={'model': 'gpt-3.5-turbo'})
3998

4099
# Create agents with sequential processing (Mini Agents pattern)
41100
agents = Agents(agents=[agent1, agent2], verbose=True)

0 commit comments

Comments
 (0)