Skip to content

Commit 567243a

Browse files
committed
fix: keep default stream=True while supporting stream=False option
1 parent 54afacd commit 567243a

2 files changed

Lines changed: 2 additions & 2 deletions

File tree

src/praisonai-agents/praisonaiagents/agent/agent.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -713,7 +713,7 @@ def _chat_completion(self, messages, temperature=0.2, tools=None, stream=True, r
713713
display_error(f"Error in chat completion: {e}")
714714
return None
715715

716-
def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False, stream=None):
716+
def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False, stream=True):
717717
# Log all parameter values when in debug mode
718718
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
719719
param_info = {

src/praisonai-agents/praisonaiagents/agents/agents.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ def process_video(video_path: str, seconds_per_frame=2):
4545
return base64_frames
4646

4747
class PraisonAIAgents:
48-
def __init__(self, agents, tasks=None, verbose=0, completion_checker=None, max_retries=5, process="sequential", manager_llm=None, memory=False, memory_config=None, embedder=None, user_id=None, max_iter=10, stream=None):
48+
def __init__(self, agents, tasks=None, verbose=0, completion_checker=None, max_retries=5, process="sequential", manager_llm=None, memory=False, memory_config=None, embedder=None, user_id=None, max_iter=10, stream=True):
4949
# Add check at the start if memory is requested
5050
if memory:
5151
try:

0 commit comments

Comments
 (0)