diff --git a/src/praisonai-agents/praisonaiagents/agent/__init__.py b/src/praisonai-agents/praisonaiagents/agent/__init__.py index 85dd2006f..6d532db6c 100644 --- a/src/praisonai-agents/praisonaiagents/agent/__init__.py +++ b/src/praisonai-agents/praisonaiagents/agent/__init__.py @@ -174,6 +174,18 @@ def __getattr__(name): from .session_manager import SessionManagerMixin _lazy_cache[name] = SessionManagerMixin return SessionManagerMixin + elif name == 'ChatMixin': + from .chat_mixin import ChatMixin + _lazy_cache[name] = ChatMixin + return ChatMixin + elif name == 'ExecutionMixin': + from .execution_mixin import ExecutionMixin + _lazy_cache[name] = ExecutionMixin + return ExecutionMixin + elif name == 'MemoryMixin': + from .memory_mixin import MemoryMixin + _lazy_cache[name] = MemoryMixin + return MemoryMixin raise AttributeError(f"module {__name__!r} has no attribute {name!r}") @@ -239,4 +251,7 @@ def __getattr__(name): 'ToolExecutionMixin', 'ChatHandlerMixin', 'SessionManagerMixin', + 'ChatMixin', + 'ExecutionMixin', + 'MemoryMixin', ] \ No newline at end of file diff --git a/src/praisonai-agents/praisonaiagents/agent/chat_mixin.py b/src/praisonai-agents/praisonaiagents/agent/chat_mixin.py index 2aab5c7bb..facab40fc 100644 --- a/src/praisonai-agents/praisonaiagents/agent/chat_mixin.py +++ b/src/praisonai-agents/praisonaiagents/agent/chat_mixin.py @@ -1426,7 +1426,7 @@ def clean_json_output(self, output: str) -> str: cleaned = cleaned[:-3].strip() return cleaned - async def achat(self, prompt: str, temperature=1.0, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False, task_name=None, task_description=None, task_id=None, attachments=None): + async def achat(self, prompt: str, temperature: float = 1.0, tools: Optional[List[Any]] = None, output_json: Optional[Any] = None, output_pydantic: Optional[Any] = None, reasoning_steps: bool = False, stream: Optional[bool] = None, task_name: Optional[str] = None, task_description: Optional[str] = None, task_id: Optional[str] = None, config: Optional[Dict[str, Any]] = None, force_retrieval: bool = False, skip_retrieval: bool = False, attachments: Optional[List[str]] = None, tool_choice: Optional[str] = None): """Async version of chat method with self-reflection support. Args: @@ -1440,12 +1440,22 @@ async def achat(self, prompt: str, temperature=1.0, tools=None, output_json=None _trace_emitter.agent_start(self.name, {"role": self.role, "goal": self.goal}) try: - return await self._achat_impl(prompt, temperature, tools, output_json, output_pydantic, reasoning_steps, task_name, task_description, task_id, attachments, _trace_emitter) + return await self._achat_impl( + prompt=prompt, temperature=temperature, tools=tools, + output_json=output_json, output_pydantic=output_pydantic, + reasoning_steps=reasoning_steps, stream=stream, + task_name=task_name, task_description=task_description, task_id=task_id, + config=config, force_retrieval=force_retrieval, skip_retrieval=skip_retrieval, + attachments=attachments, _trace_emitter=_trace_emitter, tool_choice=tool_choice + ) finally: _trace_emitter.agent_end(self.name) - async def _achat_impl(self, prompt, temperature, tools, output_json, output_pydantic, reasoning_steps, task_name, task_description, task_id, attachments, _trace_emitter): + async def _achat_impl(self, prompt, temperature, tools, output_json, output_pydantic, reasoning_steps, stream, task_name, task_description, task_id, config, force_retrieval, skip_retrieval, attachments, _trace_emitter, tool_choice=None): """Internal async chat implementation (extracted for trace wrapping).""" + # Use agent's stream setting if not explicitly provided + if stream is None: + stream = self.stream # Process ephemeral attachments (DRY - builds multimodal prompt) # IMPORTANT: Original text 'prompt' is stored in history, attachments are NOT llm_prompt = self._build_multimodal_prompt(prompt, attachments) if attachments else prompt @@ -1506,7 +1516,7 @@ async def _achat_impl(self, prompt, temperature, tools, output_json, output_pyda if self._knowledge_sources and not self._knowledge_processed: self._ensure_knowledge_processed() - if self.knowledge: + if not skip_retrieval and self.knowledge: search_results = self.knowledge.search(prompt, agent_id=self.agent_id) if search_results: if isinstance(search_results, dict) and 'results' in search_results: @@ -1580,7 +1590,8 @@ async def _achat_impl(self, prompt, temperature, tools, output_json, output_pyda task_description=task_description, task_id=task_id, execute_tool_fn=self.execute_tool_async, - reasoning_steps=reasoning_steps + reasoning_steps=reasoning_steps, + stream=stream ) self.chat_history.append({"role": "assistant", "content": response_text}) @@ -1686,12 +1697,18 @@ async def _achat_impl(self, prompt, temperature, tools, output_json, output_pyda # Make the API call based on the type of request if tools: - response = await self._openai_client.async_client.chat.completions.create( + effective_tool_choice = tool_choice or getattr(self, '_yaml_tool_choice', None) + tool_call_kwargs = dict( model=self.llm, messages=messages, temperature=temperature, tools=formatted_tools, ) + if effective_tool_choice: + tool_call_kwargs['tool_choice'] = effective_tool_choice + response = await self._openai_client.async_client.chat.completions.create( + **tool_call_kwargs + ) result = await self._achat_completion(response, tools) if get_logger().getEffectiveLevel() == logging.DEBUG: total_time = time.time() - start_time @@ -1920,7 +1937,7 @@ async def _achat_completion(self, response, tools, reasoning_steps=False): model=self.llm, messages=messages, temperature=1.0, - stream=True + stream=stream ) full_response_text = "" reasoning_content = "" diff --git a/src/praisonai-agents/praisonaiagents/agent/memory_mixin.py b/src/praisonai-agents/praisonaiagents/agent/memory_mixin.py index 76f62532a..238e9e58a 100644 --- a/src/praisonai-agents/praisonaiagents/agent/memory_mixin.py +++ b/src/praisonai-agents/praisonaiagents/agent/memory_mixin.py @@ -7,12 +7,8 @@ """ import os -import re -import time -import json import logging from praisonaiagents._logging import get_logger -import threading # Fallback helpers to avoid circular imports def _get_console():