Skip to content
5 changes: 5 additions & 0 deletions src/praisonai-agents/praisonaiagents/agent/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,10 @@ def __getattr__(name):
from .heartbeat import HeartbeatConfig
_lazy_cache[name] = HeartbeatConfig
return HeartbeatConfig
if name == 'InterruptController':
from .interrupt import InterruptController
_lazy_cache[name] = InterruptController
return InterruptController

# Specialized agents - lazy loaded (import rich)
if name == 'ImageAgent':
Expand Down Expand Up @@ -194,6 +198,7 @@ def __getattr__(name):
'BudgetExceededError',
'Heartbeat',
'HeartbeatConfig',
'InterruptController',
'ImageAgent',
'VideoAgent',
'VideoConfig',
Expand Down
35 changes: 33 additions & 2 deletions src/praisonai-agents/praisonaiagents/agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -245,6 +245,7 @@ def _get_default_server_registry() -> ServerRegistry:
from ..context.models import ContextConfig
from ..context.manager import ContextManager
from ..knowledge.knowledge import Knowledge
from .interrupt import InterruptController
from ..agent.autonomy import AutonomyConfig
from ..task.task import Task
from .handoff import Handoff, HandoffConfig, HandoffResult
Expand Down Expand Up @@ -531,7 +532,7 @@ def __init__(
# CONSOLIDATED FEATURE PARAMS (agent-centric API)
# Each follows: False=disabled, True=defaults, Config=custom
# ============================================================
memory: Optional[Union[bool, str, 'MemoryConfig', 'MemoryManager']] = None,
memory: Optional[Union[bool, str, 'MemoryConfig', Any]] = None,
knowledge: Optional[Union[bool, str, List[str], 'KnowledgeConfig', 'Knowledge']] = None,
planning: Optional[Union[bool, str, 'PlanningConfig']] = False,
reflection: Optional[Union[bool, str, 'ReflectionConfig']] = None,
Expand All @@ -551,6 +552,7 @@ def __init__(
parallel_tool_calls: bool = False, # Gap 2: Enable parallel execution of batched LLM tool calls
learn: Optional[Union[bool, str, Dict[str, Any], 'LearnConfig']] = None, # Continuous learning (peer to memory)
backend: Optional[Any] = None, # External managed agent backend (e.g., ManagedAgentIntegration)
interrupt_controller: Optional['InterruptController'] = None, # G2: Cooperative cancellation
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

🧩 Analysis chain

🏁 Script executed:

#!/bin/bash
python -m ruff check src/praisonai-agents/praisonaiagents/agent/agent.py --select F821

Repository: MervinPraison/PraisonAI

Length of output: 1594


🏁 Script executed:

head -100 src/praisonai-agents/praisonaiagents/agent/agent.py | cat -n

Repository: MervinPraison/PraisonAI

Length of output: 4467


🏁 Script executed:

rg "TYPE_CHECKING" src/praisonai-agents/praisonaiagents/agent/agent.py -A 5 -B 2

Repository: MervinPraison/PraisonAI

Length of output: 734


🏁 Script executed:

rg "interrupt_controller" src/praisonai-agents/praisonaiagents/agent/agent.py -B 2 -A 2

Repository: MervinPraison/PraisonAI

Length of output: 1076


🏁 Script executed:

rg "InterruptController" src/praisonai-agents/praisonaiagents/agent/ --type py

Repository: MervinPraison/PraisonAI

Length of output: 970


🏁 Script executed:

fd "interrupt" src/praisonai-agents/praisonaiagents/ --type f

Repository: MervinPraison/PraisonAI

Length of output: 122


🏁 Script executed:

rg "MemoryManager" src/praisonai-agents/praisonaiagents/agent/agent.py -B 2 -A 2

Repository: MervinPraison/PraisonAI

Length of output: 731


🏁 Script executed:

rg "run_autonomous\(" src/praisonai-agents/praisonaiagents/agent/agent.py -A 20 | head -60

Repository: MervinPraison/PraisonAI

Length of output: 2551


🏁 Script executed:

rg "run_autonomous_async" src/praisonai-agents/praisonaiagents/agent/ -A 15 | head -80

Repository: MervinPraison/PraisonAI

Length of output: 7839


🏁 Script executed:

cat -n src/praisonai-agents/praisonaiagents/agent/agent.py | sed -n '530,545p'

Repository: MervinPraison/PraisonAI

Length of output: 1484


🏁 Script executed:

rg "class MemoryManager" src/praisonai-agents/praisonaiagents/ --type py

Repository: MervinPraison/PraisonAI

Length of output: 49


🏁 Script executed:

rg "from.*MemoryManager\|import.*MemoryManager" src/praisonai-agents/praisonaiagents/ --type py

Repository: MervinPraison/PraisonAI

Length of output: 49


🏁 Script executed:

cat -n src/praisonai-agents/praisonaiagents/agent/agent.py | sed -n '115,130p'

Repository: MervinPraison/PraisonAI

Length of output: 829


🏁 Script executed:

# Check if interrupt check is in the sync version
rg "def run_autonomous" src/praisonai-agents/praisonaiagents/agent/agent.py -A 100 | grep -A 80 "def run_autonomous("  | head -100

Repository: MervinPraison/PraisonAI

Length of output: 3612


🏁 Script executed:

# Get full run_autonomous method to check for interrupt_controller usage
rg "def run_autonomous\(" src/praisonai-agents/praisonaiagents/agent/agent.py -A 250 | grep -i "interrupt" | head -10

Repository: MervinPraison/PraisonAI

Length of output: 49


🏁 Script executed:

# Check what TYPE_CHECKING currently imports
cat -n src/praisonai-agents/praisonaiagents/agent/agent.py | sed -n '135,165p'

Repository: MervinPraison/PraisonAI

Length of output: 1466


🏁 Script executed:

# Get more lines from the autonomy loop in sync version
rg "def run_autonomous\(" src/praisonai-agents/praisonaiagents/agent/agent.py -A 180 | tail -100

Repository: MervinPraison/PraisonAI

Length of output: 4714


🏁 Script executed:

# Find where interrupt_controller is actually checked in async version
rg "interrupt_controller.*is_set" src/praisonai-agents/praisonaiagents/agent/ -B 5 -A 5

Repository: MervinPraison/PraisonAI

Length of output: 1236


🏁 Script executed:

# Show the complete current TYPE_CHECKING block
cat -n src/praisonai-agents/praisonaiagents/agent/agent.py | sed -n '175,210p'

Repository: MervinPraison/PraisonAI

Length of output: 1732


🏁 Script executed:

# Check if there's any import for MemoryManager in TYPE_CHECKING
rg "MemoryManager" src/praisonai-agents/praisonaiagents/ --type py | grep -i "import\|from"

Repository: MervinPraison/PraisonAI

Length of output: 49


🏁 Script executed:

# Find the exact TYPE_CHECKING block
rg "if TYPE_CHECKING:" src/praisonai-agents/praisonaiagents/agent/agent.py -A 20

Repository: MervinPraison/PraisonAI

Length of output: 1145


🏁 Script executed:

# Check if there are other F821 errors besides the two found
python -m ruff check src/praisonai-agents/praisonaiagents/agent/agent.py --select F821 2>&1 | cat

Repository: MervinPraison/PraisonAI

Length of output: 1594


🏁 Script executed:

# Look for where MemoryManager is defined or if it exists
fd "memory" src/praisonai-agents/praisonaiagents/ --type f | grep -i manager

Repository: MervinPraison/PraisonAI

Length of output: 49


🏁 Script executed:

# Check if PlanningConfig exists
rg "class PlanningConfig" src/praisonai-agents/praisonaiagents/ --type py

Repository: MervinPraison/PraisonAI

Length of output: 151


🏁 Script executed:

# Verify if MemoryManager is supposed to exist or if it's a typo/unused type hint
rg "MemoryManager" src/praisonai-agents/praisonaiagents/ --type py -B 3 -A 3

Repository: MervinPraison/PraisonAI

Length of output: 3172


🏁 Script executed:

# Check what other types are in the union at line 534 and see if they're all in TYPE_CHECKING
rg "memory.*Optional.*Union" src/praisonai-agents/praisonaiagents/agent/agent.py -A 2

Repository: MervinPraison/PraisonAI

Length of output: 319


🏁 Script executed:

# Get full list of undefined types mentioned in F821 errors to assess scope
python -m ruff check src/praisonai-agents/praisonaiagents/agent/agent.py --select F821 2>&1 | grep "Undefined name" | cut -d'`' -f2 | sort | uniq

Repository: MervinPraison/PraisonAI

Length of output: 100


Add missing type imports for static analysis, and fix MemoryManager union type.

Ruff reports two F821 errors: InterruptController and MemoryManager. Add InterruptController to the TYPE_CHECKING block; for MemoryManager, use Any instead since it's not a defined class—it's a placeholder for pre-configured instances.

Proposed fix
 if TYPE_CHECKING:
     from ..approval.protocols import ApprovalConfig, ApprovalProtocol
     from ..config.feature_configs import LearnConfig, MemoryConfig
     from ..context.models import ContextConfig
     from ..context.manager import ContextManager
     from ..knowledge.knowledge import Knowledge
+    from .interrupt import InterruptController
     from ..agent.autonomy import AutonomyConfig
     from ..task.task import Task
     from .handoff import Handoff, HandoffConfig, HandoffResult
     from ..rag.models import RAGResult, ContextPack
     from ..eval.results import EvaluationLoopResult
-        memory: Optional[Union[bool, str, 'MemoryConfig', 'MemoryManager']] = None,
+        memory: Optional[Union[bool, str, 'MemoryConfig', Any]] = None,
🧰 Tools
🪛 Ruff (0.15.10)

[error] 554-554: Undefined name InterruptController

(F821)

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@src/praisonai-agents/praisonaiagents/agent/agent.py` at line 554, Ruff flags
undefined names InterruptController and MemoryManager in the signature; update
the static types by adding InterruptController to the TYPE_CHECKING imports (so
it’s available only for typing) and replace the MemoryManager union/type with
typing.Any (and import Any if not present) in the function/class where
interrupt_controller: Optional['InterruptController'] = None and any
parameters/attributes typed as MemoryManager; this removes the unresolved
MemoryManager symbol and ensures InterruptController is recognized for static
analysis.

):
"""Initialize an Agent instance.

Expand All @@ -575,7 +577,7 @@ def __init__(
memory: Memory system configuration. Accepts:
- bool: True enables defaults, False disables
- MemoryConfig: Custom configuration
- MemoryManager: Pre-configured instance
- Any: Pre-configured memory instance
knowledge: Knowledge sources. Accepts:
- bool: True enables defaults
- List[str]: File paths, URLs, or text content
Expand Down Expand Up @@ -1458,6 +1460,8 @@ def __init__(
self.instructions = instructions
# Gap 2: Store parallel tool calls setting for ToolCallExecutor selection
self.parallel_tool_calls = parallel_tool_calls
# G2: Store interrupt controller for cooperative cancellation
self.interrupt_controller = interrupt_controller
# Check for model name in environment variable if not provided
self._using_custom_llm = False
# Flag to track if final result has been displayed to prevent duplicates
Expand Down Expand Up @@ -2835,6 +2839,19 @@ def run_autonomous(
started_at=started_at,
)

# G2: Check for interrupt request (cooperative cancellation) - sync version
if self.interrupt_controller and self.interrupt_controller.is_set():
reason = self.interrupt_controller.reason or "unknown"
return AutonomyResult(
success=False,
output=f"Task interrupted: {reason}",
completion_reason="interrupted",
iterations=iterations,
stage=stage,
actions=actions_taken,
duration_seconds=time_module.time() - start_time,
started_at=started_at,
)

# Execute one turn using the agent's chat method
# Always use the original prompt (prompt re-injection)
Expand Down Expand Up @@ -3232,6 +3249,20 @@ async def main():
started_at=started_at,
)

# G2: Check for interrupt request (cooperative cancellation)
if self.interrupt_controller and self.interrupt_controller.is_set():
reason = self.interrupt_controller.reason or "unknown"
return AutonomyResult(
success=False,
output=f"Task interrupted: {reason}",
completion_reason="interrupted",
iterations=iterations,
stage=stage,
actions=actions_taken,
duration_seconds=time_module.time() - start_time,
started_at=started_at,
)
Comment on lines +3252 to +3264
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Mirror interruption in the sync autonomous loop.

interrupt_controller only stops run_autonomous_async(). Users calling run_autonomous() with the same agent will ignore cancellation until another mechanism stops it.

Apply the same guard in `run_autonomous()` before `self.chat(prompt)`
                 if timeout_seconds and (time_module.time() - start_time) > timeout_seconds:
                     return AutonomyResult(
                         success=False,
                         output="Task timed out",
                         completion_reason="timeout",
@@
                         started_at=started_at,
                     )
+
+                if self.interrupt_controller and self.interrupt_controller.is_set():
+                    reason = self.interrupt_controller.reason or "unknown"
+                    return AutonomyResult(
+                        success=False,
+                        output=f"Task interrupted: {reason}",
+                        completion_reason="interrupted",
+                        iterations=iterations,
+                        stage=stage,
+                        actions=actions_taken,
+                        duration_seconds=time_module.time() - start_time,
+                        started_at=started_at,
+                    )

As per coding guidelines, Agent parameters must support both sync and async entry points.

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@src/praisonai-agents/praisonaiagents/agent/agent.py` around lines 3238 -
3250, The sync entrypoint run_autonomous() currently ignores
interrupt_controller so add the same cooperative-cancellation guard used in
run_autonomous_async(): before invoking self.chat(prompt) check if
self.interrupt_controller and self.interrupt_controller.is_set(), capture reason
= self.interrupt_controller.reason or "unknown", and return an AutonomyResult
with success=False, output like f"Task interrupted: {reason}",
completion_reason="interrupted", and preserve iterations, stage, actions_taken,
duration_seconds (time_module.time() - start_time) and started_at so the sync
loop mirrors the async behavior.



# Execute one turn using the agent's async chat method
# Always use the original prompt (prompt re-injection)
Expand Down
105 changes: 105 additions & 0 deletions src/praisonai-agents/praisonaiagents/agent/interrupt.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
"""
Interrupt Controller - Cooperative cancellation for agent runs.

Provides thread-safe, cooperative cancellation mechanism for long-running agent
operations. Follows protocol-driven design with zero overhead when not used.
"""

import threading
from typing import Optional, Protocol
from dataclasses import dataclass, field

__all__ = ["InterruptControllerProtocol", "InterruptController"]


class InterruptControllerProtocol(Protocol):
"""Protocol for interrupt controller extension point."""

def request(self, reason: str = "user") -> None:
"""Request cancellation with optional reason."""
...

def clear(self) -> None:
"""Clear interrupt state."""
...

def is_set(self) -> bool:
"""Check if interrupt was requested."""
...

@property
def reason(self) -> Optional[str]:
"""Get interrupt reason if set."""
...

def check(self) -> None:
"""Check for interrupt and raise if set."""
...


@dataclass
class InterruptController:
Comment on lines +40 to +41
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion | 🟠 Major

Add a protocol for the interrupt extension point.

Agent accepts this as a pluggable cancellation surface, so expose a lightweight protocol alongside the default implementation.

Proposed protocol surface
-from typing import Optional
+from typing import Optional, Protocol
 from dataclasses import dataclass, field
 
-__all__ = ["InterruptController"]
+__all__ = ["InterruptControllerProtocol", "InterruptController"]
+
+
+class InterruptControllerProtocol(Protocol):
+    def request(self, reason: str = "user") -> None: ...
+    def clear(self) -> None: ...
+    def is_set(self) -> bool: ...
+
+    `@property`
+    def reason(self) -> Optional[str]: ...
+
+    def check(self) -> None: ...

As per coding guidelines, Core SDK must use protocol-driven design with typing.Protocol for all extension points, not heavy implementations.

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@src/praisonai-agents/praisonaiagents/agent/interrupt.py` around lines 15 -
16, Add a lightweight typing.Protocol for the interrupt extension point (e.g.,
name it InterruptControllerProtocol) in the same module and import Protocol from
typing; define the protocol with the same public method signatures used by Agent
and the existing class (match methods declared on InterruptController), then
have the concrete InterruptController class formally implement/extend that
protocol so Agent can accept the protocol type as its pluggable cancellation
surface. Ensure the protocol is exported/available for type hints (and update
any Agent type annotations to use InterruptControllerProtocol where
appropriate).

"""Thread-safe cooperative cancellation for agent runs.

Provides a lightweight mechanism for requesting cancellation of agent
operations. Uses threading.Event for thread safety and cooperative
checking patterns.

Examples:
Basic usage:
>>> controller = InterruptController()
>>> # In another thread:
>>> controller.request("user_cancel")
>>> # In agent loop:
>>> if controller.is_set():
>>> return f"Cancelled: {controller.reason}"
"""

_flag: threading.Event = field(default_factory=threading.Event, init=False, repr=False)
_reason: Optional[str] = field(default=None, init=False)
_lock: threading.Lock = field(default_factory=threading.Lock, init=False, repr=False)

def request(self, reason: str = "user") -> None:
"""Request cancellation with a reason.

Args:
reason: Human-readable reason for cancellation
"""
with self._lock:
if not self._flag.is_set():
self._reason = reason
self._flag.set()

def clear(self) -> None:
"""Clear the cancellation request."""
with self._lock:
self._reason = None
self._flag.clear()

def is_set(self) -> bool:
"""Check if cancellation has been requested.

Returns:
True if cancellation was requested
"""
return self._flag.is_set()

@property
def reason(self) -> Optional[str]:
"""Get the reason for cancellation.

Returns:
Reason string if cancelled, None otherwise
"""
with self._lock:
return self._reason

def check(self) -> None:
"""Check for cancellation and raise if requested.

Raises:
InterruptedError: If cancellation was requested
"""
if self.is_set():
reason = self.reason or "unknown"
raise InterruptedError(f"Operation cancelled: {reason}")
29 changes: 28 additions & 1 deletion src/praisonai-agents/praisonaiagents/llm/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -159,6 +159,26 @@ def __getattr__(name):
from .unified_adapters import create_llm_dispatcher
_lazy_cache[name] = create_llm_dispatcher
return create_llm_dispatcher
elif name == "sanitize_messages":
from .sanitize import sanitize_messages
_lazy_cache[name] = sanitize_messages
return sanitize_messages
elif name == "strip_surrogates":
from .sanitize import strip_surrogates
_lazy_cache[name] = strip_surrogates
return strip_surrogates
elif name == "sanitize_text":
from .sanitize import sanitize_text
_lazy_cache[name] = sanitize_text
return sanitize_text
elif name == "ErrorCategory":
from .error_classifier import ErrorCategory
_lazy_cache[name] = ErrorCategory
return ErrorCategory
elif name == "classify_error":
from .error_classifier import classify_error
_lazy_cache[name] = classify_error
return classify_error

raise AttributeError(f"module {__name__!r} has no attribute {name!r}")

Expand Down Expand Up @@ -199,5 +219,12 @@ def __getattr__(name):
"LLMProviderError",
"RateLimitError",
"ModelNotAvailableError",
"ContextLengthExceededError"
"ContextLengthExceededError",
# Sanitization (G6)
"sanitize_messages",
"strip_surrogates",
"sanitize_text",
# Error Classification (G5)
"ErrorCategory",
"classify_error"
]
Loading