Skip to content

Commit 54afacd

Browse files
authored
fix: remove default stream value for agent
1 parent fab4aca commit 54afacd

2 files changed

Lines changed: 3 additions & 3 deletions

File tree

src/praisonai-agents/praisonaiagents/agent/agent.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -713,7 +713,7 @@ def _chat_completion(self, messages, temperature=0.2, tools=None, stream=True, r
713713
display_error(f"Error in chat completion: {e}")
714714
return None
715715

716-
def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False, stream=True):
716+
def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False, stream=None):
717717
# Log all parameter values when in debug mode
718718
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
719719
param_info = {
@@ -949,7 +949,7 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd
949949

950950
logging.debug(f"{self.name} reflection count {reflection_count + 1}, continuing reflection process")
951951
messages.append({"role": "user", "content": "Now regenerate your response using the reflection you made"})
952-
response = self._chat_completion(messages, temperature=temperature, tools=None, stream=True)
952+
response = self._chat_completion(messages, temperature=temperature, tools=None, stream=stream)
953953
response_text = response.choices[0].message.content.strip()
954954
reflection_count += 1
955955
continue # Continue the loop for more reflections

src/praisonai-agents/praisonaiagents/agents/agents.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ def process_video(video_path: str, seconds_per_frame=2):
4545
return base64_frames
4646

4747
class PraisonAIAgents:
48-
def __init__(self, agents, tasks=None, verbose=0, completion_checker=None, max_retries=5, process="sequential", manager_llm=None, memory=False, memory_config=None, embedder=None, user_id=None, max_iter=10, stream=True):
48+
def __init__(self, agents, tasks=None, verbose=0, completion_checker=None, max_retries=5, process="sequential", manager_llm=None, memory=False, memory_config=None, embedder=None, user_id=None, max_iter=10, stream=None):
4949
# Add check at the start if memory is requested
5050
if memory:
5151
try:

0 commit comments

Comments
 (0)