Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 15 additions & 0 deletions src/praisonai-agents/praisonaiagents/agent/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,6 +174,18 @@ def __getattr__(name):
from .session_manager import SessionManagerMixin
_lazy_cache[name] = SessionManagerMixin
return SessionManagerMixin
elif name == 'ChatMixin':
from .chat_mixin import ChatMixin
_lazy_cache[name] = ChatMixin
return ChatMixin
elif name == 'ExecutionMixin':
from .execution_mixin import ExecutionMixin
_lazy_cache[name] = ExecutionMixin
return ExecutionMixin
elif name == 'MemoryMixin':
from .memory_mixin import MemoryMixin
_lazy_cache[name] = MemoryMixin
return MemoryMixin

raise AttributeError(f"module {__name__!r} has no attribute {name!r}")

Expand Down Expand Up @@ -239,4 +251,7 @@ def __getattr__(name):
'ToolExecutionMixin',
'ChatHandlerMixin',
'SessionManagerMixin',
'ChatMixin',
'ExecutionMixin',
'MemoryMixin',
]
31 changes: 24 additions & 7 deletions src/praisonai-agents/praisonaiagents/agent/chat_mixin.py
Original file line number Diff line number Diff line change
Expand Up @@ -1426,7 +1426,7 @@ def clean_json_output(self, output: str) -> str:
cleaned = cleaned[:-3].strip()
return cleaned

async def achat(self, prompt: str, temperature=1.0, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False, task_name=None, task_description=None, task_id=None, attachments=None):
async def achat(self, prompt: str, temperature: float = 1.0, tools: Optional[List[Any]] = None, output_json: Optional[Any] = None, output_pydantic: Optional[Any] = None, reasoning_steps: bool = False, stream: Optional[bool] = None, task_name: Optional[str] = None, task_description: Optional[str] = None, task_id: Optional[str] = None, config: Optional[Dict[str, Any]] = None, force_retrieval: bool = False, skip_retrieval: bool = False, attachments: Optional[List[str]] = None, tool_choice: Optional[str] = None):
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

high

The signature of achat has been aligned with chat, but the implementation is currently incomplete. The new parameters (stream, config, force_retrieval, skip_retrieval, tool_choice) are not passed to the internal _achat_impl call (line 1443), nor is _achat_impl (line 1447) updated to accept or handle them. This makes these parameters non-functional in the async path. Additionally, adding the return type hint -> Optional[str] would improve consistency with the sync chat method.

Suggested change
async def achat(self, prompt: str, temperature: float = 1.0, tools: Optional[List[Any]] = None, output_json: Optional[Any] = None, output_pydantic: Optional[Any] = None, reasoning_steps: bool = False, stream: Optional[bool] = None, task_name: Optional[str] = None, task_description: Optional[str] = None, task_id: Optional[str] = None, config: Optional[Dict[str, Any]] = None, force_retrieval: bool = False, skip_retrieval: bool = False, attachments: Optional[List[str]] = None, tool_choice: Optional[str] = None):
async def achat(self, prompt: str, temperature: float = 1.0, tools: Optional[List[Any]] = None, output_json: Optional[Any] = None, output_pydantic: Optional[Any] = None, reasoning_steps: bool = False, stream: Optional[bool] = None, task_name: Optional[str] = None, task_description: Optional[str] = None, task_id: Optional[str] = None, config: Optional[Dict[str, Any]] = None, force_retrieval: bool = False, skip_retrieval: bool = False, attachments: Optional[List[str]] = None, tool_choice: Optional[str] = None) -> Optional[str]:

Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Action required

1. achat() positional args shifted 📎 Requirement gap ≡ Correctness

The achat() signature inserts new parameters (e.g., stream) before existing ones like
task_name, which breaks callers that pass arguments positionally. This violates the requirement
for zero breaking API changes and consistent chat method signatures when accessed via Agent.
Agent Prompt
## Issue description
`achat()` added new parameters (e.g., `stream`, `config`, retrieval flags) before previously-existing parameters (`task_name`, `task_description`, `task_id`, `attachments`). This is a breaking change for any callers using positional arguments beyond `reasoning_steps`.

## Issue Context
Compliance requires zero breaking API changes and stable public method signatures. Even if internal call sites use keywords, external users may be passing positional arguments.

## Fix Focus Areas
- src/praisonai-agents/praisonaiagents/agent/chat_mixin.py[1429-1429]

ⓘ Copy this prompt and use it to remediate the issue with your preferred AI generation tools

"""Async version of chat method with self-reflection support.

Args:
Expand All @@ -1440,12 +1440,22 @@ async def achat(self, prompt: str, temperature=1.0, tools=None, output_json=None
_trace_emitter.agent_start(self.name, {"role": self.role, "goal": self.goal})

try:
return await self._achat_impl(prompt, temperature, tools, output_json, output_pydantic, reasoning_steps, task_name, task_description, task_id, attachments, _trace_emitter)
return await self._achat_impl(
prompt=prompt, temperature=temperature, tools=tools,
output_json=output_json, output_pydantic=output_pydantic,
reasoning_steps=reasoning_steps, stream=stream,
task_name=task_name, task_description=task_description, task_id=task_id,
config=config, force_retrieval=force_retrieval, skip_retrieval=skip_retrieval,
attachments=attachments, _trace_emitter=_trace_emitter, tool_choice=tool_choice
)
finally:
_trace_emitter.agent_end(self.name)

async def _achat_impl(self, prompt, temperature, tools, output_json, output_pydantic, reasoning_steps, task_name, task_description, task_id, attachments, _trace_emitter):
async def _achat_impl(self, prompt, temperature, tools, output_json, output_pydantic, reasoning_steps, stream, task_name, task_description, task_id, config, force_retrieval, skip_retrieval, attachments, _trace_emitter, tool_choice=None):
"""Internal async chat implementation (extracted for trace wrapping)."""
# Use agent's stream setting if not explicitly provided
if stream is None:
stream = self.stream
# Process ephemeral attachments (DRY - builds multimodal prompt)
# IMPORTANT: Original text 'prompt' is stored in history, attachments are NOT
llm_prompt = self._build_multimodal_prompt(prompt, attachments) if attachments else prompt
Expand Down Expand Up @@ -1506,7 +1516,7 @@ async def _achat_impl(self, prompt, temperature, tools, output_json, output_pyda
if self._knowledge_sources and not self._knowledge_processed:
self._ensure_knowledge_processed()

if self.knowledge:
if not skip_retrieval and self.knowledge:
search_results = self.knowledge.search(prompt, agent_id=self.agent_id)
if search_results:
if isinstance(search_results, dict) and 'results' in search_results:
Expand Down Expand Up @@ -1580,7 +1590,8 @@ async def _achat_impl(self, prompt, temperature, tools, output_json, output_pyda
task_description=task_description,
task_id=task_id,
execute_tool_fn=self.execute_tool_async,
reasoning_steps=reasoning_steps
reasoning_steps=reasoning_steps,
stream=stream
)

self.chat_history.append({"role": "assistant", "content": response_text})
Expand Down Expand Up @@ -1686,12 +1697,18 @@ async def _achat_impl(self, prompt, temperature, tools, output_json, output_pyda

# Make the API call based on the type of request
if tools:
response = await self._openai_client.async_client.chat.completions.create(
effective_tool_choice = tool_choice or getattr(self, '_yaml_tool_choice', None)
tool_call_kwargs = dict(
model=self.llm,
messages=messages,
temperature=temperature,
tools=formatted_tools,
)
if effective_tool_choice:
tool_call_kwargs['tool_choice'] = effective_tool_choice
response = await self._openai_client.async_client.chat.completions.create(
**tool_call_kwargs
)
result = await self._achat_completion(response, tools)
if get_logger().getEffectiveLevel() == logging.DEBUG:
total_time = time.time() - start_time
Expand Down Expand Up @@ -1920,7 +1937,7 @@ async def _achat_completion(self, response, tools, reasoning_steps=False):
model=self.llm,
messages=messages,
temperature=1.0,
stream=True
stream=stream
)
full_response_text = ""
reasoning_content = ""
Expand Down
4 changes: 0 additions & 4 deletions src/praisonai-agents/praisonaiagents/agent/memory_mixin.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,8 @@
"""

import os
import re
import time
import json
import logging
from praisonaiagents._logging import get_logger
import threading

# Fallback helpers to avoid circular imports
def _get_console():
Expand Down