Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 13 additions & 5 deletions src/praisonai-agents/praisonaiagents/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,10 +176,10 @@ def _get_lazy_cache():
'HandoffResult': ('praisonaiagents.agent.handoff', 'HandoffResult'),
'HandoffInputData': ('praisonaiagents.agent.handoff', 'HandoffInputData'),
'ContextPolicy': ('praisonaiagents.agent.handoff', 'ContextPolicy'),
'HandoffError': ('praisonaiagents.agent.handoff', 'HandoffError'),
'HandoffCycleError': ('praisonaiagents.agent.handoff', 'HandoffCycleError'),
'HandoffDepthError': ('praisonaiagents.agent.handoff', 'HandoffDepthError'),
'HandoffTimeoutError': ('praisonaiagents.agent.handoff', 'HandoffTimeoutError'),
'HandoffError': ('praisonaiagents.errors', 'HandoffError'),
'HandoffCycleError': ('praisonaiagents.errors', 'HandoffCycleError'),
'HandoffDepthError': ('praisonaiagents.errors', 'HandoffDepthError'),
'HandoffTimeoutError': ('praisonaiagents.errors', 'HandoffTimeoutError'),

# Embedding API (Note: embedding/embeddings handled in custom_handler to override subpackage)
'aembedding': ('praisonaiagents.embedding.embed', 'aembedding'),
Expand All @@ -203,7 +203,15 @@ def _get_lazy_cache():

# Agent classes
'Agent': ('praisonaiagents.agent.agent', 'Agent'),
'BudgetExceededError': ('praisonaiagents.agent.agent', 'BudgetExceededError'),
'BudgetExceededError': ('praisonaiagents.errors', 'BudgetExceededError'),

# Error hierarchy - structured exception handling
'PraisonAIError': ('praisonaiagents.errors', 'PraisonAIError'),
'ToolExecutionError': ('praisonaiagents.errors', 'ToolExecutionError'),
'LLMError': ('praisonaiagents.errors', 'LLMError'),
'ValidationError': ('praisonaiagents.errors', 'ValidationError'),
'NetworkError': ('praisonaiagents.errors', 'NetworkError'),
'ErrorContextProtocol': ('praisonaiagents.errors', 'ErrorContextProtocol'),
'Heartbeat': ('praisonaiagents.agent.heartbeat', 'Heartbeat'),
'HeartbeatConfig': ('praisonaiagents.agent.heartbeat', 'HeartbeatConfig'),
'ImageAgent': ('praisonaiagents.agent.image_agent', 'ImageAgent'),
Expand Down
19 changes: 2 additions & 17 deletions src/praisonai-agents/praisonaiagents/agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -187,23 +187,8 @@ def _is_file_path(value: str) -> bool:
from ..rag.models import RAGResult, ContextPack
from ..eval.results import EvaluationLoopResult

class BudgetExceededError(Exception):
"""Raised when an agent exceeds its max_budget.

Usage:
try:
agent.start("...")
except BudgetExceededError as e:
print(f"Agent '{e.agent_name}' spent ${e.total_cost:.4f} of ${e.max_budget:.4f}")
"""
def __init__(self, agent_name: str, total_cost: float, max_budget: float):
self.agent_name = agent_name
self.total_cost = total_cost
self.max_budget = max_budget
super().__init__(
f"Agent '{agent_name}' exceeded budget: "
f"${total_cost:.4f} >= ${max_budget:.4f}"
)
# Import structured error from central errors module
from ..errors import BudgetExceededError

class Agent(ToolExecutionMixin, ChatHandlerMixin, SessionManagerMixin, ChatMixin, ExecutionMixin, MemoryMixin):
# Class-level counter for generating unique display names for nameless agents
Expand Down
11 changes: 10 additions & 1 deletion src/praisonai-agents/praisonaiagents/agent/chat_mixin.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
from praisonaiagents._logging import get_logger
import asyncio
import threading
from ..errors import BudgetExceededError

# Fallback helpers to avoid circular imports
def _get_console():
Expand Down Expand Up @@ -666,7 +667,13 @@ def _chat_completion(self, messages, temperature=1.0, tools=None, stream=True, r
self._llm_call_count += 1
if self._max_budget and self._total_cost >= self._max_budget:
if self._on_budget_exceeded == "stop":
raise BudgetExceededError(self.name, self._total_cost, self._max_budget)
raise BudgetExceededError(
f"Agent '{self.name}' exceeded budget: ${self._total_cost:.4f} >= ${self._max_budget:.4f}",
budget_type="cost",
limit=self._max_budget,
used=self._total_cost,
agent_id=self.name
)
Comment on lines +670 to +676
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🔴 Critical

🧩 Analysis chain

🏁 Script executed:

#!/bin/bash
set -euo pipefail

FILE="src/praisonai-agents/praisonaiagents/agent/chat_mixin.py"
rg -n -C2 'BudgetExceededError|except Exception' "$FILE"

Repository: MervinPraison/PraisonAI

Length of output: 14466


🏁 Script executed:

# Check if BudgetExceededError is defined anywhere in the codebase
rg -n "class BudgetExceededError" --type py

Repository: MervinPraison/PraisonAI

Length of output: 160


🏁 Script executed:

# Check imports at the top of chat_mixin.py
head -50 src/praisonai-agents/praisonaiagents/agent/chat_mixin.py | grep -n "import"

Repository: MervinPraison/PraisonAI

Length of output: 527


🏁 Script executed:

# Read lines 660-710 for full context of the budget check and exception handling
sed -n '660,710p' src/praisonai-agents/praisonaiagents/agent/chat_mixin.py | cat -n

Repository: MervinPraison/PraisonAI

Length of output: 2739


🏁 Script executed:

# Check what the test expects
rg -n "BudgetExceededError" src/praisonai-agents/tests/unit/test_token_budget.py -A 3 -B 3

Repository: MervinPraison/PraisonAI

Length of output: 1167


🏁 Script executed:

# Read the BudgetExceededError class definition
sed -n '114,140p' src/praisonai-agents/praisonaiagents/errors.py | cat -n

Repository: MervinPraison/PraisonAI

Length of output: 990


🏁 Script executed:

# Check if BudgetExceededError is re-exported from agent module
rg -n "BudgetExceededError" src/praisonai-agents/praisonaiagents/agent/__init__.py src/praisonai-agents/praisonaiagents/agent/agent.py 2>/dev/null | head -20

Repository: MervinPraison/PraisonAI

Length of output: 649


🏁 Script executed:

# Check what exception handling wraps the budget check - full method context
sed -n '600,750p' src/praisonai-agents/praisonaiagents/agent/chat_mixin.py | cat -n

Repository: MervinPraison/PraisonAI

Length of output: 8856


Add missing import and fix budget-exceeded exception propagation.

BudgetExceededError is raised at line 670 but never imported, causing a NameError. The generic except Exception handler at line 702 will catch this before the caller sees it. Additionally, budget_type="tokens" is semantically incorrect—the values being passed (_max_budget and _total_cost) are in USD, not tokens.

Required fixes

Add the import:

+from ..errors import BudgetExceededError

Fix the parameter:

                     raise BudgetExceededError(
                         f"Agent '{self.name}' exceeded budget: ${self._total_cost:.4f} >= ${self._max_budget:.4f}",
-                        budget_type="tokens",
+                        budget_type="cost",
                         limit=self._max_budget,
                         used=self._total_cost,
                         agent_id=self.name
                     )

Add explicit re-raise in the generic handler at line 702:

except BudgetExceededError:
    raise
except Exception as e:
    # existing overflow recovery logic
🧰 Tools
🪛 Ruff (0.15.9)

[error] 669-669: Undefined name BudgetExceededError

(F821)

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@src/praisonai-agents/praisonaiagents/agent/chat_mixin.py` around lines 669 -
675, The raise of BudgetExceededError in the chat mixin uses an undefined symbol
and the wrong budget_type and is swallowed by a broad exception handler; import
BudgetExceededError at the top of the module, change the raised error's
budget_type from "tokens" to "usd" (or "USD") while keeping
limit=self._max_budget and used=self._total_cost, and modify the exception
handling around the method that contains the raise (the generic except block
that currently catches all exceptions) to explicitly re-raise
BudgetExceededError (add an `except BudgetExceededError: raise`) before the
existing `except Exception as e:` recovery logic so the caller can observe the
budget-exceeded condition.

elif self._on_budget_exceeded == "warn":
logging.warning(
f"[budget] {self.name}: ${self._total_cost:.4f} exceeded "
Expand All @@ -692,6 +699,8 @@ def _chat_completion(self, messages, temperature=1.0, tools=None, stream=True, r

return final_response

except BudgetExceededError:
raise
except Exception as e:
error_str = str(e).lower()

Expand Down
56 changes: 30 additions & 26 deletions src/praisonai-agents/praisonaiagents/agent/handoff.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,29 +97,13 @@ def from_dict(cls, data: Dict[str, Any]) -> 'HandoffConfig':
data["context_policy"] = ContextPolicy(data["context_policy"])
return cls(**{k: v for k, v in data.items() if k in cls.__dataclass_fields__})

class HandoffError(Exception):
"""Base exception for handoff errors."""
pass

class HandoffCycleError(HandoffError):
"""Raised when a cycle is detected in handoff chain."""
def __init__(self, chain: List[str]):
self.chain = chain
super().__init__(f"Handoff cycle detected: {' -> '.join(chain)}")

class HandoffDepthError(HandoffError):
"""Raised when max handoff depth is exceeded."""
def __init__(self, depth: int, max_depth: int):
self.depth = depth
self.max_depth = max_depth
super().__init__(f"Max handoff depth exceeded: {depth} > {max_depth}")

class HandoffTimeoutError(HandoffError):
"""Raised when handoff times out."""
def __init__(self, timeout: float, agent_name: str):
self.timeout = timeout
self.agent_name = agent_name
super().__init__(f"Handoff to {agent_name} timed out after {timeout}s")
# Import structured error hierarchy from central errors module
from ..errors import (
HandoffError,
HandoffCycleError,
HandoffDepthError,
HandoffTimeoutError
)

# Thread-local storage for tracking handoff chains
_handoff_context = threading.local()
Expand Down Expand Up @@ -267,12 +251,26 @@ def _check_safety(self, source_agent: 'Agent') -> None:
if self.config.detect_cycles:
chain = _get_handoff_chain()
if target_name in chain:
raise HandoffCycleError(chain + [target_name])
cycle_path = chain + [target_name]
raise HandoffCycleError(
f"Handoff cycle detected: {' -> '.join(cycle_path)}",
source_agent=source_agent.name if hasattr(source_agent, 'name') else 'unknown',
target_agent=target_name,
agent_id=source_agent.name if hasattr(source_agent, 'name') else 'unknown',
cycle_path=cycle_path
Comment on lines +255 to +260
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Migrate the timeout branch to the same constructor contract.

These two raises were updated to the centralized, keyword-based shape, but Line 539 still calls HandoffTimeoutError(self.config.timeout_seconds, self.agent.name) with the old positional order. That turns the timeout into the message and drops the structured handoff context.

🐛 Suggested fix
-            raise HandoffTimeoutError(self.config.timeout_seconds, self.agent.name)
+            raise HandoffTimeoutError(
+                f"Handoff to {self.agent.name} timed out after {self.config.timeout_seconds}s",
+                timeout_seconds=self.config.timeout_seconds,
+                source_agent=source_agent.name if hasattr(source_agent, "name") else "unknown",
+                target_agent=self.agent.name,
+                agent_id=source_agent.name if hasattr(source_agent, "name") else "unknown",
+            )

Also applies to: 266-273

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@src/praisonai-agents/praisonaiagents/agent/handoff.py` around lines 255 -
260, The HandoffTimeoutError is being called with old positional args so the
timeout becomes the message and structured context is lost; update both places
that call HandoffTimeoutError(self.config.timeout_seconds, self.agent.name) to
use the keyword-based constructor shape (e.g.
HandoffTimeoutError(timeout_seconds=self.config.timeout_seconds,
agent_name=self.agent.name)) so the error preserves the structured fields
consistent with how HandoffCycleError is constructed.

)

# Check depth
current_depth = _get_handoff_depth()
if current_depth >= self.config.max_depth:
raise HandoffDepthError(current_depth + 1, self.config.max_depth)
raise HandoffDepthError(
f"Max handoff depth exceeded: {current_depth + 1} > {self.config.max_depth}",
source_agent=source_agent.name if hasattr(source_agent, 'name') else 'unknown',
target_agent=target_name,
agent_id=source_agent.name if hasattr(source_agent, 'name') else 'unknown',
max_depth=self.config.max_depth,
current_depth=current_depth + 1
)

def _prepare_context(self, source_agent: 'Agent', kwargs: Dict[str, Any]) -> HandoffInputData:
"""
Expand Down Expand Up @@ -538,7 +536,13 @@ async def _execute():
handoff_depth=_get_handoff_depth(),
)
self._execute_callback(self.config.on_error, source_agent, kwargs, result)
raise HandoffTimeoutError(self.config.timeout_seconds, self.agent.name)
raise HandoffTimeoutError(
f"Handoff to {self.agent.name} timed out after {self.config.timeout_seconds}s",
timeout_seconds=self.config.timeout_seconds,
source_agent=source_agent.name if hasattr(source_agent, "name") else "unknown",
target_agent=self.agent.name,
agent_id=source_agent.name if hasattr(source_agent, "name") else "unknown"
)
except Exception as e:
result = HandoffResult(
success=False,
Expand Down
196 changes: 99 additions & 97 deletions src/praisonai-agents/praisonaiagents/agents/agents.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,93 @@ def get_multimodal_message(text_prompt: str, images: list) -> list:
})
return content

def _prepare_task_prompt(task, task_description, context_text=None):
"""
Prepare task prompt with context and memory (DRY helper).
"""
# Build task prompt - only use "User Input/Topic" format if there's actual content
if context_text and context_text.strip():
task_prompt = f"""
User Input/Topic: {context_text}

Task: {task_description}
Expected Output: {task.expected_output}

IMPORTANT: Your response must be about the user's input/topic above. Incorporate it into your task."""
else:
task_prompt = f"""
You need to do the following task: {task_description}.
Expected Output: {task.expected_output}."""

# Add memory context if available
if task.memory:
try:
memory_context = task.memory.build_context_for_task(task.description)
if memory_context:
Comment on lines +129 to +133
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

Use the substituted description for memory lookup.

task_description already contains the variable-expanded prompt text, but memory retrieval still queries task.description. Templated tasks will pull memory against the unresolved template instead of the actual request.

🔧 Suggested fix
-            memory_context = task.memory.build_context_for_task(task.description)
+            memory_context = task.memory.build_context_for_task(task_description)
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@src/praisonai-agents/praisonaiagents/agents/agents.py` around lines 129 -
133, The memory lookup is using the unresolved template (task.description)
instead of the expanded prompt; change the call to memory.build_context_for_task
to pass the substituted text variable (task_description) rather than
task.description so templated tasks retrieve memory against the actual,
variable-expanded request (update the block around the if task.memory check
where memory_context is assigned).

# Log detailed memory context for debugging
logger.debug(f"Memory context for task '{task.description}': {memory_context}")
# Include actual memory content without verbose headers (essential for AI agent functionality)
task_prompt += f"\n\n{memory_context}"
except Exception as e:
logger.error(f"Error getting memory context: {e}")

task_prompt += "\nPlease provide only the final result of your work. Do not add any conversation or extra explanation."
return task_prompt

def _execute_with_agent_sync(executor_agent, task_prompt, task, tools, stream=None):
"""
Execute task with agent synchronously (DRY helper).
"""
if task.images:
return executor_agent.chat(
get_multimodal_message(task_prompt, task.images),
tools=tools,
output_json=task.output_json,
output_pydantic=task.output_pydantic,
stream=stream,
task_name=task.name,
task_description=task.description,
task_id=task.id
)
else:
return executor_agent.chat(
task_prompt,
tools=tools,
output_json=task.output_json,
output_pydantic=task.output_pydantic,
stream=stream,
task_name=task.name,
task_description=task.description,
task_id=task.id
)

async def _execute_with_agent_async(executor_agent, task_prompt, task, tools, stream=None):
"""
Execute task with agent asynchronously (DRY helper).
"""
if task.images:
return await executor_agent.achat(
get_multimodal_message(task_prompt, task.images),
tools=tools,
output_json=task.output_json,
output_pydantic=task.output_pydantic,
stream=stream,
task_name=task.name,
task_description=task.description,
task_id=task.id
)
else:
return await executor_agent.achat(
task_prompt,
tools=tools,
output_json=task.output_json,
output_pydantic=task.output_pydantic,
stream=stream,
task_name=task.name,
task_description=task.description,
task_id=task.id
)

def process_task_context(context_item, verbose=0, user_id=None):
"""
Process a single context item for task execution.
Expand Down Expand Up @@ -703,59 +790,17 @@ async def aexecute_task(self, task_id):
context_separator = '\n\n'
context_text = context_separator.join(unique_contexts)

# Build task prompt - only use "User Input/Topic" format if there's actual content
if context_text and context_text.strip():
task_prompt = f"""
User Input/Topic: {context_text}

Task: {task_description}
Expected Output: {task.expected_output}

IMPORTANT: Your response must be about the user's input/topic above. Incorporate it into your task."""
else:
task_prompt = f"""
You need to do the following task: {task_description}.
Expected Output: {task.expected_output}."""

# Add memory context if available
if task.memory:
try:
memory_context = task.memory.build_context_for_task(task.description)
if memory_context:
# Log detailed memory context for debugging
logger.debug(f"Memory context for task '{task.description}': {memory_context}")
# Include actual memory content without verbose headers (essential for AI agent functionality)
task_prompt += f"\n\n{memory_context}"
except Exception as e:
logger.error(f"Error getting memory context: {e}")

task_prompt += "\nPlease provide only the final result of your work. Do not add any conversation or extra explanation."
# Build task prompt using DRY helper
task_prompt = _prepare_task_prompt(task, task_description, context_text)

if self.verbose >= 2:
logger.info(f"Executing task {task_id}: {task_description} using {executor_agent.display_name}")
logger.debug(f"Starting execution of task {task_id} with prompt:\n{task_prompt}")

if task.images:
# Use shared multimodal helper (DRY - defined at module level)
agent_output = await executor_agent.achat(
get_multimodal_message(task_prompt, task.images),
tools=tools,
output_json=task.output_json,
output_pydantic=task.output_pydantic,
task_name=task.name,
task_description=task.description,
task_id=task.id
)
else:
agent_output = await executor_agent.achat(
task_prompt,
tools=tools,
output_json=task.output_json,
output_pydantic=task.output_pydantic,
task_name=task.name,
task_description=task.description,
task_id=task.id
)
# Execute with agent using DRY helper (fixes missing stream parameter)
agent_output = await _execute_with_agent_async(
executor_agent, task_prompt, task, tools, stream=self.stream
)
Comment on lines +800 to +803
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Don’t override agent-level streaming with a team-wide default.

These calls now always pass stream=self.stream, but the generator resolves streaming per role and stores it on each PraisonAgent.output. An explicit stream= kwarg here prevents chat() / achat() from falling back to the current agent’s setting, so mixed per-agent YAML configs can’t be honored and uniform agent configs now depend on the AgentTeam default instead.

Also applies to: 1121-1124

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@src/praisonai-agents/praisonaiagents/agents/agents.py` around lines 800 -
803, The call to _execute_with_agent_async is forcing a team-wide stream setting
by passing stream=self.stream, which overrides per-agent streaming stored on
each PraisonAgent.output and prevents chat()/achat() from falling back to the
agent’s own setting; remove the explicit stream=self.stream kwarg from this call
(and the other similar call sites around the other agent execution block) so
_execute_with_agent_async can use the agent-level/default behavior and allow
PraisonAgent.output and chat()/achat() to determine streaming per role.


if agent_output:
# Store the response in memory
Expand Down Expand Up @@ -1066,60 +1111,17 @@ def execute_task(self, task_id):
context_separator = '\n\n'
context_text = context_separator.join(unique_contexts)

# Build task prompt - only use "User Input/Topic" format if there's actual content
if context_text and context_text.strip():
task_prompt = f"""
User Input/Topic: {context_text}

Task: {task_description}
Expected Output: {task.expected_output}

IMPORTANT: Your response must be about the user's input/topic above. Incorporate it into your task."""
else:
task_prompt = f"""
You need to do the following task: {task_description}.
Expected Output: {task.expected_output}."""

# Add memory context if available
if task.memory:
try:
memory_context = task.memory.build_context_for_task(task.description)
if memory_context:
# Log detailed memory context for debugging
logger.debug(f"Memory context for task '{task.description}': {memory_context}")
# Include actual memory content without verbose headers (essential for AI agent functionality)
task_prompt += f"\n\n{memory_context}"
except Exception as e:
logger.error(f"Error getting memory context: {e}")

task_prompt += "\nPlease provide only the final result of your work. Do not add any conversation or extra explanation."
# Build task prompt using DRY helper
task_prompt = _prepare_task_prompt(task, task_description, context_text)

if self.verbose >= 2:
logger.info(f"Executing task {task_id}: {task.description} using {executor_agent.display_name}")
logger.debug(f"Starting execution of task {task_id} with prompt:\n{task_prompt}")

if task.images:
# Use shared multimodal helper (DRY - defined at module level)
agent_output = executor_agent.chat(
get_multimodal_message(task_prompt, task.images),
tools=tools,
output_json=task.output_json,
output_pydantic=task.output_pydantic,
task_name=task.name,
task_description=task.description,
task_id=task_id
)
else:
agent_output = executor_agent.chat(
task_prompt,
tools=tools,
output_json=task.output_json,
output_pydantic=task.output_pydantic,
stream=self.stream,
task_name=task.name,
task_description=task.description,
task_id=task_id
)
# Execute with agent using DRY helper
agent_output = _execute_with_agent_sync(
executor_agent, task_prompt, task, tools, stream=self.stream
)

if agent_output:
# Store the response in memory
Expand Down
Loading
Loading