Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion src/praisonai-agents/praisonaiagents/agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,9 @@
from .tool_execution import ToolExecutionMixin
from .chat_handler import ChatHandlerMixin
from .session_manager import SessionManagerMixin
from .chat_mixin import ChatMixin
from .execution_mixin import ExecutionMixin
from .memory_mixin import MemoryMixin

# Module-level logger for thread safety errors and debugging
logger = get_logger(__name__)
Expand Down Expand Up @@ -196,7 +199,7 @@ def __init__(self, agent_name: str, total_cost: float, max_budget: float):
f"${total_cost:.4f} >= ${max_budget:.4f}"
)

class Agent(ToolExecutionMixin, ChatHandlerMixin, SessionManagerMixin):
class Agent(ToolExecutionMixin, ChatHandlerMixin, SessionManagerMixin, ChatMixin, ExecutionMixin, MemoryMixin):
Copy link

Copilot AI Apr 1, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The new base-class order includes both ChatHandlerMixin and ChatMixin, each defining chat/achat (and related helpers). With ChatHandlerMixin placed before ChatMixin in the MRO, any future move/removal of Agent.chat/achat could unexpectedly route calls through ChatHandlerMixin instead of ChatMixin. Consider removing one of the overlapping mixins or reordering bases so the intended implementation mixin takes precedence (and avoid having multiple mixins define the same public methods).

Suggested change
class Agent(ToolExecutionMixin, ChatHandlerMixin, SessionManagerMixin, ChatMixin, ExecutionMixin, MemoryMixin):
class Agent(ToolExecutionMixin, ChatMixin, ChatHandlerMixin, SessionManagerMixin, ExecutionMixin, MemoryMixin):

Copilot uses AI. Check for mistakes.
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Action required

4. agent.py exceeds 5000 lines 📎 Requirement gap ⚙ Maintainability

praisonaiagents/agent/agent.py remains well above the 5000-line limit after this change. This
violates the required size-reduction target for the decomposition work.
Agent Prompt
## Issue description
`agent.py` is still >5000 lines, so the decomposition is incomplete per the required target.

## Issue Context
Further extraction is needed (moving method bodies into mixins and leaving only thin delegations/structure in `Agent`) until `agent.py` is ≤5000 lines.

## Fix Focus Areas
- src/praisonai-agents/praisonaiagents/agent/agent.py[5001-8919]
- src/praisonai-agents/praisonaiagents/agent/chat_mixin.py[1-99]
- src/praisonai-agents/praisonaiagents/agent/execution_mixin.py[1-142]
- src/praisonai-agents/praisonaiagents/agent/memory_mixin.py[1-118]

ⓘ Copy this prompt and use it to remediate the issue with your preferred AI generation tools

Copy link

Copilot AI Apr 1, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The new base-class order includes both ChatHandlerMixin and ChatMixin, each defining chat/achat (and related helpers). With ChatHandlerMixin placed before ChatMixin in the MRO, any future move/removal of Agent.chat/achat could unexpectedly route calls through ChatHandlerMixin instead of ChatMixin. Consider removing one of the overlapping mixins or reordering bases so the intended implementation mixin takes precedence (and avoid having multiple mixins define the same public methods).

Suggested change
class Agent(ToolExecutionMixin, ChatHandlerMixin, SessionManagerMixin, ChatMixin, ExecutionMixin, MemoryMixin):
class Agent(ToolExecutionMixin, ChatMixin, ChatHandlerMixin, SessionManagerMixin, ExecutionMixin, MemoryMixin):

Copilot uses AI. Check for mistakes.
# Class-level counter for generating unique display names for nameless agents
_agent_counter = 0
_agent_counter_lock = threading.Lock()
Expand Down
99 changes: 99 additions & 0 deletions src/praisonai-agents/praisonaiagents/agent/chat_mixin.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
"""
Chat and LLM functionality mixin for Agent class.

This module contains all chat/LLM-related methods extracted from the Agent class
for better organization and maintainability.
"""

import os
import time
import json
import logging
import asyncio
from typing import List, Optional, Any, Dict, Union, Literal, Generator, Callable
from praisonaiagents._logging import get_logger

logger = get_logger(__name__)


class ChatMixin:
"""
Mixin class containing all chat and LLM-related functionality.

This mixin handles:
- chat() and achat() methods
- LLM completion processing
- Stream handling
- Tool call processing
- Response formatting
"""

def chat(self, prompt: str, temperature: float = 1.0, tools: Optional[List[Any]] = None,
output_json: Optional[Any] = None, output_pydantic: Optional[Any] = None,
reasoning_steps: bool = False, stream: Optional[bool] = None,
task_name: Optional[str] = None, task_description: Optional[str] = None,
task_id: Optional[str] = None, config: Optional[Dict[str, Any]] = None,
force_retrieval: bool = False, skip_retrieval: bool = False,
attachments: Optional[List[str]] = None, tool_choice: Optional[str] = None) -> Optional[str]:
"""
Chat with the agent.

Args:
prompt: Text query that WILL be stored in chat_history
attachments: Optional list of image/file paths that are ephemeral
(used for THIS turn only, NEVER stored in history).
Supports: file paths, URLs, or data URIs.
tool_choice: Optional tool choice mode ('auto', 'required', 'none').
'required' forces the LLM to call a tool before responding.
...other args...
"""
# This method will be implemented by moving the actual implementation from agent.py
# For now, this is a placeholder to maintain the mixin structure
raise NotImplementedError("chat() method needs to be moved from agent.py")
Comment on lines +50 to +52
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Action required

1. chatmixin.chat() raises notimplementederror 📎 Requirement gap ⚙ Maintainability

ChatMixin defines required chat/LLM methods as placeholders that raise NotImplementedError
rather than containing the extracted implementation. This does not satisfy the required
decomposition and leaves the chat extraction incomplete.
Agent Prompt
## Issue description
`ChatMixin` contains placeholder implementations (raising `NotImplementedError`) instead of the real chat/LLM + streaming logic, so the required extraction is not complete.

## Issue Context
The compliance requirement expects the actual `Agent` chat/LLM implementations (and helpers) to live in `praisonaiagents/agent/chat_mixin.py` while remaining callable on `Agent` via mixin inheritance (same signatures/behavior).

## Fix Focus Areas
- src/praisonai-agents/praisonaiagents/agent/chat_mixin.py[31-99]
- src/praisonai-agents/praisonaiagents/agent/agent.py[5624-5665]
- src/praisonai-agents/praisonaiagents/agent/agent.py[6258-6840]

ⓘ Copy this prompt and use it to remediate the issue with your preferred AI generation tools


async def achat(self, prompt: str, temperature=1.0, tools=None, output_json=None,
output_pydantic=None, reasoning_steps=False, task_name=None,
task_description=None, task_id=None, attachments=None):
Comment on lines +54 to +56
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

The signature for achat is inconsistent with the chat method. It's missing several parameters (stream, config, force_retrieval, skip_retrieval, tool_choice), type hints for existing parameters, and a return type hint. To maintain consistency between the sync and async versions of the chat functionality, I suggest updating the achat signature to match chat.

Comment on lines +54 to +56
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

achat signature is not API-parity with chat and risks async behavior drift.

Line 54 narrows async parameters versus Line 31 (stream, config, retrieval flags, tool_choice, etc.). If this becomes the active method, async callers lose options and can break.

Suggested parity patch
-    async def achat(self, prompt: str, temperature=1.0, tools=None, output_json=None, 
-                    output_pydantic=None, reasoning_steps=False, task_name=None, 
-                    task_description=None, task_id=None, attachments=None):
+    async def achat(
+        self,
+        prompt: str,
+        temperature: float = 1.0,
+        tools: Optional[List[Any]] = None,
+        output_json: Optional[Any] = None,
+        output_pydantic: Optional[Any] = None,
+        reasoning_steps: bool = False,
+        stream: Optional[bool] = None,
+        task_name: Optional[str] = None,
+        task_description: Optional[str] = None,
+        task_id: Optional[str] = None,
+        config: Optional[Dict[str, Any]] = None,
+        force_retrieval: bool = False,
+        skip_retrieval: bool = False,
+        attachments: Optional[List[str]] = None,
+        tool_choice: Optional[str] = None,
+    ) -> Optional[str]:

Based on learnings, “Public API changes require a deprecation cycle: emit DeprecationWarning for one release before breaking change.”

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@src/praisonai-agents/praisonaiagents/agent/chat_mixin.py` around lines 54 -
56, The async method achat has a narrower parameter list than chat which breaks
API parity and risks async behavior drift; update achat's signature to include
the same parameters as chat (e.g., stream, config, retrieval flags, tool_choice,
and any other optional args present on chat) and implement achat as a thin
wrapper that forwards all parameters to chat (or vice versa) to preserve
behavior, then emit a DeprecationWarning inside achat indicating it will be
deprecated/removed in a future release so callers have one release to migrate;
reference the achat and chat functions when making the changes.

"""
Async version of chat method.
"""
# This method will be implemented by moving the actual implementation from agent.py
# For now, this is a placeholder to maintain the mixin structure
raise NotImplementedError("achat() method needs to be moved from agent.py")

def _chat_completion(self, messages, temperature=1.0, tools=None, stream=True,
reasoning_steps=False, task_name=None, task_description=None,
task_id=None, response_format=None):
"""
Core LLM completion method.
"""
# This method will be implemented by moving the actual implementation from agent.py
raise NotImplementedError("_chat_completion() method needs to be moved from agent.py")

def _process_stream_response(self, messages, temperature, start_time, formatted_tools=None, reasoning_steps=False):
"""
Process streaming LLM response.
"""
# This method will be implemented by moving the actual implementation from agent.py
raise NotImplementedError("_process_stream_response() method needs to be moved from agent.py")

def _process_agent_output(self, response: str, prompt: str = "", tools_used: Optional[List[str]] = None) -> str:
"""
Process and format agent output.
"""
# This method will be implemented by moving the actual implementation from agent.py
Comment on lines +80 to +84
Copy link

Copilot AI Apr 1, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This adds new private methods onto Agent via inheritance that currently raise NotImplementedError (e.g., _process_agent_output). Previously, these attributes did not exist on Agent (AttributeError). Introducing them as runtime stubs can break introspection/feature-detection patterns (hasattr/getattr) and is a subtle backward-compatibility change. Prefer omitting unimplemented helpers until they are moved, or have them delegate to the current implementation in agent.py.

Copilot uses AI. Check for mistakes.
raise NotImplementedError("_process_agent_output() method needs to be moved from agent.py")

def _format_response(self, response: str) -> str:
"""
Format agent response for display.
"""
# This method will be implemented by moving the actual implementation from agent.py
raise NotImplementedError("_format_response() method needs to be moved from agent.py")
Comment on lines +90 to +92
Copy link

Copilot AI Apr 1, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

_format_response() is introduced as a NotImplementedError stub on Agent via mixin inheritance. If this helper isn’t yet implemented/moved, consider removing it for now (or delegating to the existing formatting logic) to avoid exposing a callable that fails at runtime.

Suggested change
"""
# This method will be implemented by moving the actual implementation from agent.py
raise NotImplementedError("_format_response() method needs to be moved from agent.py")
This default implementation returns the response unchanged. Subclasses or
concrete Agent implementations may override this method to provide custom
formatting logic (e.g., markdown post-processing, trimming, etc.).
"""
logger.debug("Using default _format_response implementation.")
return response

Copilot uses AI. Check for mistakes.

def _handle_tool_calls(self, tool_calls: List[Any], messages: List[Dict], temperature: float) -> tuple:
"""
Handle tool calls during chat completion.
"""
# This method will be implemented by moving the actual implementation from agent.py
raise NotImplementedError("_handle_tool_calls() method needs to be moved from agent.py")
Comment on lines +97 to +99
Copy link

Copilot AI Apr 1, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

_handle_tool_calls() is exposed on Agent via ChatMixin but is unimplemented. If any code path (or user extension) starts using this helper during the refactor, it will now raise NotImplementedError. Prefer not adding the runtime method until it is fully wired to the current tool-call handling implementation.

Suggested change
"""
# This method will be implemented by moving the actual implementation from agent.py
raise NotImplementedError("_handle_tool_calls() method needs to be moved from agent.py")
This default implementation does not execute any tools. It logs a warning and
returns the messages unchanged along with an empty list of tool results.
Subclasses or concrete Agent implementations can override this method with
proper tool-call handling logic.
"""
logger.warning(
"Tool calls were requested but `_handle_tool_calls` is not fully "
"implemented for this agent. Returning messages unchanged."
)
# Return messages unchanged and an empty list of tool call results.
return messages, []

Copilot uses AI. Check for mistakes.
142 changes: 142 additions & 0 deletions src/praisonai-agents/praisonaiagents/agent/execution_mixin.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,142 @@
"""
Execution functionality mixin for Agent class.

This module contains all execution-related methods extracted from the Agent class
for better organization and maintainability.
"""

import asyncio
from typing import Optional, Any, Generator, Union
from praisonaiagents._logging import get_logger

logger = get_logger(__name__)


class ExecutionMixin:
"""
Mixin class containing all execution-related functionality.

This mixin handles:
- run() and arun() methods
- start() and astart() methods
- run_until() and run_until_async() methods
- run_autonomous() and run_autonomous_async() methods
- Execution flow control
"""

def run(self, prompt: str, **kwargs: Any) -> Optional[str]:
"""
Run the agent with a prompt.

Args:
prompt: The input prompt for the agent
**kwargs: Additional keyword arguments passed to chat()

Returns:
Agent response as string
"""
# This method will be implemented by moving the actual implementation from agent.py
# For now, this is a placeholder to maintain the mixin structure
raise NotImplementedError("run() method needs to be moved from agent.py")
Comment on lines +38 to +40
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Action required

2. executionmixin.run() raises notimplementederror 📎 Requirement gap ⚙ Maintainability

ExecutionMixin defines execution lifecycle methods as placeholders that raise
NotImplementedError rather than containing extracted logic. This does not satisfy the required
execution decomposition and risks incorrect behavior if these methods resolve via MRO.
Agent Prompt
## Issue description
`ExecutionMixin` currently contains stub methods that raise `NotImplementedError` instead of the real execution lifecycle logic.

## Issue Context
To meet the decomposition requirement, `run()`/`arun()`/`start()`/`astart()`/`run_until()` and related helpers should be moved out of `agent.py` into `execution_mixin.py`, maintaining the same public API on `Agent` via inheritance.

## Fix Focus Areas
- src/praisonai-agents/praisonaiagents/agent/execution_mixin.py[27-142]
- src/praisonai-agents/praisonaiagents/agent/agent.py[2547-2700]
- src/praisonai-agents/praisonaiagents/agent/agent.py[3350-3500]
- src/praisonai-agents/praisonaiagents/agent/agent.py[7420-7612]

ⓘ Copy this prompt and use it to remediate the issue with your preferred AI generation tools


async def arun(self, prompt: str, **kwargs):
"""
Async version of run method.

Args:
prompt: The input prompt for the agent
**kwargs: Additional keyword arguments

Returns:
Agent response
"""
# This method will be implemented by moving the actual implementation from agent.py
raise NotImplementedError("arun() method needs to be moved from agent.py")

def start(self, prompt: Optional[str] = None, **kwargs: Any) -> Union[str, Generator[str, None, None], None]:
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

🧩 Analysis chain

🏁 Script executed:

#!/bin/bash
# Check the original signatures of start() and astart() in agent.py
echo "=== Checking start() signature ==="
rg -n "def start\s*\(" src/praisonai-agents/praisonaiagents/agent/agent.py -A 3

echo ""
echo "=== Checking astart() signature ==="
rg -n "async def astart\s*\(" src/praisonai-agents/praisonaiagents/agent/agent.py -A 3

Repository: MervinPraison/PraisonAI

Length of output: 645


🏁 Script executed:

cat -n src/praisonai-agents/praisonaiagents/agent/execution_mixin.py | head -100

Repository: MervinPraison/PraisonAI

Length of output: 4254


Signature mismatch: prompt parameter differs between sync and async variants.

start() (line 56) has an optional prompt parameter with default None, while astart() (line 70) requires prompt as a mandatory argument. This inconsistency between sync and async entry points violates API design principles and will confuse users about which method to call and how to call it. Align both signatures so prompt has the same optionality in both variants.

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@src/praisonai-agents/praisonaiagents/agent/execution_mixin.py` at line 56,
The sync and async entrypoints have mismatched signatures: start(self, prompt:
Optional[str] = None, ...) makes prompt optional while astart(self, prompt: str,
...) requires it; make them consistent by changing one to match the other so
callers see the same contract. Update either start to require prompt (remove
default Optional and default None) or change astart to accept Optional[str] =
None; ensure both methods' type annotations and docstrings align and any
internal logic handles None the same way (refer to the start and astart method
definitions to locate and update).

"""
Start the agent execution.

Args:
prompt: Optional input prompt
**kwargs: Additional keyword arguments

Returns:
Agent response or generator for streaming
"""
# This method will be implemented by moving the actual implementation from agent.py
raise NotImplementedError("start() method needs to be moved from agent.py")

async def astart(self, prompt: str, **kwargs):
"""
Async version of start method.

Args:
prompt: The input prompt for the agent
**kwargs: Additional keyword arguments

Returns:
Agent response
"""
# This method will be implemented by moving the actual implementation from agent.py
raise NotImplementedError("astart() method needs to be moved from agent.py")

def run_until(self, condition_func: callable, max_iterations: int = 10, **kwargs) -> Any:
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

Use Callable type from typing instead of callable.

callable (lowercase) is a built-in function that checks if an object is callable at runtime, not a type annotation. Static type checkers will not understand this correctly.

Proposed fix

Update the import on line 9:

-from typing import Optional, Any, Generator, Union
+from typing import Optional, Any, Generator, Union, Callable

Then update the type hints:

-    def run_until(self, condition_func: callable, max_iterations: int = 10, **kwargs) -> Any:
+    def run_until(self, condition_func: Callable[[], bool], max_iterations: int = 10, **kwargs) -> Any:
-    async def run_until_async(self, condition_func: callable, max_iterations: int = 10, **kwargs) -> Any:
+    async def run_until_async(self, condition_func: Callable[[], bool], max_iterations: int = 10, **kwargs) -> Any:

Also applies to: 99-99

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@src/praisonai-agents/praisonaiagents/agent/execution_mixin.py` at line 84,
The type annotation for parameters annotated with the builtin callable should be
replaced with typing.Callable: add Callable (and Any/Optional types if needed)
to the import from typing, then change the signature of run_until (and any other
functions using the lowercase callable, e.g., the other function noted in the
diff) to use Callable[..., Any] or a more specific Callable signature (for
example Callable[[ArgType], ReturnType]) instead of callable; ensure the
imported symbol is used in the type hints consistently across the file.

"""
Run agent until a condition is met.

Args:
condition_func: Function that returns True when condition is met
max_iterations: Maximum number of iterations
**kwargs: Additional arguments

Returns:
Result when condition is met
"""
# This method will be implemented by moving the actual implementation from agent.py
raise NotImplementedError("run_until() method needs to be moved from agent.py")

async def run_until_async(self, condition_func: callable, max_iterations: int = 10, **kwargs) -> Any:
"""
Async version of run_until.

Args:
condition_func: Function that returns True when condition is met
max_iterations: Maximum number of iterations
**kwargs: Additional arguments

Returns:
Result when condition is met
Comment on lines +84 to +109
Copy link

Copilot AI Apr 1, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The placeholder signature for run_until() (condition_func, max_iterations, **kwargs) does not match the current Agent.run_until API (prompt, criteria, threshold, mode, on_iteration, verbose, ...). To keep this scaffolding accurate and make the later method-move mechanical and safe, please mirror the exact public signature and return type from agent.py in the stub (same for run_until_async).

Suggested change
def run_until(self, condition_func: callable, max_iterations: int = 10, **kwargs) -> Any:
"""
Run agent until a condition is met.
Args:
condition_func: Function that returns True when condition is met
max_iterations: Maximum number of iterations
**kwargs: Additional arguments
Returns:
Result when condition is met
"""
# This method will be implemented by moving the actual implementation from agent.py
raise NotImplementedError("run_until() method needs to be moved from agent.py")
async def run_until_async(self, condition_func: callable, max_iterations: int = 10, **kwargs) -> Any:
"""
Async version of run_until.
Args:
condition_func: Function that returns True when condition is met
max_iterations: Maximum number of iterations
**kwargs: Additional arguments
Returns:
Result when condition is met
def run_until(
self,
prompt: str,
criteria: Any,
threshold: Optional[float] = None,
mode: str = "any",
on_iteration: Optional[callable] = None,
verbose: bool = False,
**kwargs: Any,
) -> Any:
"""
Run the agent until specified criteria are met.
Args:
prompt: The initial prompt to start the run loop.
criteria: The stopping criteria (e.g., a condition description or object).
threshold: Optional numeric threshold associated with the criteria.
mode: How to evaluate the criteria (e.g., "any", "all").
on_iteration: Optional callback invoked after each iteration.
verbose: Whether to log additional information during execution.
**kwargs: Additional keyword arguments forwarded to the underlying run logic.
Returns:
Result produced when the stopping criteria are met.
"""
# This method will be implemented by moving the actual implementation from agent.py
raise NotImplementedError("run_until() method needs to be moved from agent.py")
async def run_until_async(
self,
prompt: str,
criteria: Any,
threshold: Optional[float] = None,
mode: str = "any",
on_iteration: Optional[callable] = None,
verbose: bool = False,
**kwargs: Any,
) -> Any:
"""
Async version of run_until.
Args:
prompt: The initial prompt to start the run loop.
criteria: The stopping criteria (e.g., a condition description or object).
threshold: Optional numeric threshold associated with the criteria.
mode: How to evaluate the criteria (e.g., "any", "all").
on_iteration: Optional async or sync callback invoked after each iteration.
verbose: Whether to log additional information during execution.
**kwargs: Additional keyword arguments forwarded to the underlying run logic.
Returns:
Result produced when the stopping criteria are met.

Copilot uses AI. Check for mistakes.
"""
# This method will be implemented by moving the actual implementation from agent.py
raise NotImplementedError("run_until_async() method needs to be moved from agent.py")

def run_autonomous(self, initial_prompt: str = "", max_iterations: int = 5, **kwargs) -> str:
"""
Run agent autonomously for multiple iterations.

Args:
Comment on lines +114 to +118
Copy link

Copilot AI Apr 1, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

run_autonomous() stub parameters/defaults don’t match Agent.run_autonomous (Agent requires prompt and supports timeout_seconds/completion_promise/clear_context, etc.). Please align the stub signature (and the async variant) with agent.py so the upcoming extraction can be done without unintentionally changing the public API.

Copilot uses AI. Check for mistakes.
initial_prompt: Starting prompt
max_iterations: Maximum number of autonomous iterations
**kwargs: Additional arguments

Returns:
Final agent response
"""
# This method will be implemented by moving the actual implementation from agent.py
raise NotImplementedError("run_autonomous() method needs to be moved from agent.py")

async def run_autonomous_async(self, initial_prompt: str = "", max_iterations: int = 5, **kwargs) -> str:
"""
Async version of run_autonomous.

Args:
initial_prompt: Starting prompt
max_iterations: Maximum number of autonomous iterations
**kwargs: Additional arguments

Returns:
Final agent response
"""
# This method will be implemented by moving the actual implementation from agent.py
raise NotImplementedError("run_autonomous_async() method needs to be moved from agent.py")
Comment on lines +84 to +142
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

high

Several method signatures in this mixin are inconsistent with their counterparts in the Agent class, which could cause issues during the refactoring.

  • run_until and run_until_async: The signatures are different from those in agent.py (e.g., line 3350). If these methods are to be moved, their signatures should match. If they are new methods, they should be named differently to avoid confusion.
  • run_autonomous and run_autonomous_async: The signatures here also differ from those in agent.py (e.g., line 2547) in terms of parameters and return types (str vs. AutonomyResult).

To ensure a smooth refactoring, please align these placeholder signatures with the methods they are intended to replace.

118 changes: 118 additions & 0 deletions src/praisonai-agents/praisonaiagents/agent/memory_mixin.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,118 @@
"""
Memory and caching functionality mixin for Agent class.

This module contains all memory and caching-related methods extracted from the Agent class
for better organization and maintainability.
"""

import threading
from typing import Any, Dict, Optional, List
from praisonaiagents._logging import get_logger

logger = get_logger(__name__)


class MemoryMixin:
"""
Mixin class containing all memory and caching-related functionality.

This mixin handles:
- Cache management (_cache_put, _cache_get)
- Chat history management (_add_to_chat_history, _truncate_chat_history)
- Memory initialization and configuration
- Session persistence
"""

def _cache_put(self, cache_dict: Dict[str, Any], key: str, value: Any) -> None:
"""
Thread-safe cache storage.

Args:
cache_dict: Dictionary to store the cached value
key: Cache key
value: Value to cache
"""
# This method will be implemented by moving the actual implementation from agent.py
# For now, this is a placeholder to maintain the mixin structure
raise NotImplementedError("_cache_put() method needs to be moved from agent.py")
Comment on lines +35 to +37
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Action required

3. memorymixin._cache_put() raises notimplementederror 📎 Requirement gap ⚙ Maintainability

MemoryMixin defines cache/history methods as placeholders that raise NotImplementedError rather
than containing extracted implementations. This fails the required memory/cache/history
decomposition and leaves the functionality in agent.py.
Agent Prompt
## Issue description
`MemoryMixin` currently provides placeholder methods that raise `NotImplementedError` instead of implementing cache/history logic.

## Issue Context
To satisfy the decomposition requirement, cache and chat-history management methods should be implemented in `memory_mixin.py` and removed (or left as thin delegations) from `agent.py` while preserving runtime behavior.

## Fix Focus Areas
- src/praisonai-agents/praisonaiagents/agent/memory_mixin.py[26-118]
- src/praisonai-agents/praisonaiagents/agent/agent.py[1793-1866]

ⓘ Copy this prompt and use it to remediate the issue with your preferred AI generation tools


def _cache_get(self, cache_dict: Dict[str, Any], key: str) -> Any:
"""
Thread-safe cache retrieval.

Args:
Comment on lines +39 to +43
Copy link

Copilot AI Apr 1, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Agent._cache_get returns None when the key is missing, but this stub is typed to return Any. Please update the return annotation to Optional[Any] (or Any | None) to match the existing behavior and make the eventual extraction type-safe.

Copilot uses AI. Check for mistakes.
cache_dict: Dictionary to retrieve from
key: Cache key

Returns:
Cached value or None if not found
"""
# This method will be implemented by moving the actual implementation from agent.py
raise NotImplementedError("_cache_get() method needs to be moved from agent.py")

def _add_to_chat_history(self, role: str, content: str) -> None:
"""
Add a message to the chat history.

Args:
role: Message role ('user', 'assistant', 'system')
content: Message content
"""
# This method will be implemented by moving the actual implementation from agent.py
raise NotImplementedError("_add_to_chat_history() method needs to be moved from agent.py")

def _add_to_chat_history_if_not_duplicate(self, role: str, content: str) -> None:
"""
Comment on lines +64 to +65
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

The return type hint for _add_to_chat_history_if_not_duplicate is -> None, but the implementation in agent.py (line 1823) and its docstring indicate that it should return a boolean value (True if the message was added, False if it was a duplicate). Please update the return type hint to -> bool.

Suggested change
def _add_to_chat_history_if_not_duplicate(self, role: str, content: str) -> None:
"""
def _add_to_chat_history_if_not_duplicate(self, role: str, content: str) -> bool:

Add message to chat history only if not duplicate.

Args:
role: Message role
content: Message content
Comment on lines +64 to +70
Copy link

Copilot AI Apr 1, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In agent.py, _add_to_chat_history_if_not_duplicate returns a bool indicating whether the message was added. This stub is annotated to return None, which will be wrong once the implementation is moved and can mislead type checkers/IDE users. Please update the return type to bool to match the current Agent contract.

Suggested change
def _add_to_chat_history_if_not_duplicate(self, role: str, content: str) -> None:
"""
Add message to chat history only if not duplicate.
Args:
role: Message role
content: Message content
def _add_to_chat_history_if_not_duplicate(self, role: str, content: str) -> bool:
"""
Add message to chat history only if not duplicate.
Args:
role: Message role
content: Message content
Returns:
True if the message was added to the chat history, False if it was a duplicate.

Copilot uses AI. Check for mistakes.
"""
# This method will be implemented by moving the actual implementation from agent.py
raise NotImplementedError("_add_to_chat_history_if_not_duplicate() method needs to be moved from agent.py")

def _get_chat_history_length(self) -> int:
"""
Get current chat history length.

Returns:
Number of messages in chat history
"""
# This method will be implemented by moving the actual implementation from agent.py
raise NotImplementedError("_get_chat_history_length() method needs to be moved from agent.py")

def _truncate_chat_history(self, length: int) -> None:
"""
Truncate chat history to specified length.

Args:
length: Maximum number of messages to keep
"""
# This method will be implemented by moving the actual implementation from agent.py
raise NotImplementedError("_truncate_chat_history() method needs to be moved from agent.py")

def clear_history(self) -> None:
"""
Clear the chat history.
"""
# This method will be implemented by moving the actual implementation from agent.py
raise NotImplementedError("clear_history() method needs to be moved from agent.py")

def _init_memory(self, memory, user_id: Optional[str] = None) -> None:
"""
Initialize memory configuration.

Args:
memory: Memory configuration
user_id: Optional user ID for memory isolation
"""
# This method will be implemented by moving the actual implementation from agent.py
raise NotImplementedError("_init_memory() method needs to be moved from agent.py")
Comment on lines +102 to +111
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion | 🟠 Major

Type the memory extension point with MemoryProtocol (or equivalent protocol), not an untyped parameter.

_init_memory(self, memory, ...) should be protocol-typed to preserve the core SDK’s protocol-driven contract.

As per coding guidelines, “Core SDK (praisonaiagents) must use protocol-driven design with typing.Protocol for all extension points, not heavy implementations.”


def _display_memory_info(self) -> None:
"""
Display memory information for debugging.
"""
# This method will be implemented by moving the actual implementation from agent.py
raise NotImplementedError("_display_memory_info() method needs to be moved from agent.py")
Loading