-
-
Notifications
You must be signed in to change notification settings - Fork 1.1k
fix: resolve three architectural gaps (DRY, error protocol, streaming parity) #1297
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -13,6 +13,7 @@ | |
| from praisonaiagents._logging import get_logger | ||
| import asyncio | ||
| import threading | ||
| from ..errors import BudgetExceededError | ||
|
|
||
| # Fallback helpers to avoid circular imports | ||
| def _get_console(): | ||
|
|
@@ -666,7 +667,13 @@ def _chat_completion(self, messages, temperature=1.0, tools=None, stream=True, r | |
| self._llm_call_count += 1 | ||
| if self._max_budget and self._total_cost >= self._max_budget: | ||
| if self._on_budget_exceeded == "stop": | ||
| raise BudgetExceededError(self.name, self._total_cost, self._max_budget) | ||
| raise BudgetExceededError( | ||
| f"Agent '{self.name}' exceeded budget: ${self._total_cost:.4f} >= ${self._max_budget:.4f}", | ||
| budget_type="cost", | ||
| limit=self._max_budget, | ||
| used=self._total_cost, | ||
| agent_id=self.name | ||
| ) | ||
|
Comment on lines
+670
to
+676
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 🧩 Analysis chain🏁 Script executed: #!/bin/bash
set -euo pipefail
FILE="src/praisonai-agents/praisonaiagents/agent/chat_mixin.py"
rg -n -C2 'BudgetExceededError|except Exception' "$FILE"Repository: MervinPraison/PraisonAI Length of output: 14466 🏁 Script executed: # Check if BudgetExceededError is defined anywhere in the codebase
rg -n "class BudgetExceededError" --type pyRepository: MervinPraison/PraisonAI Length of output: 160 🏁 Script executed: # Check imports at the top of chat_mixin.py
head -50 src/praisonai-agents/praisonaiagents/agent/chat_mixin.py | grep -n "import"Repository: MervinPraison/PraisonAI Length of output: 527 🏁 Script executed: # Read lines 660-710 for full context of the budget check and exception handling
sed -n '660,710p' src/praisonai-agents/praisonaiagents/agent/chat_mixin.py | cat -nRepository: MervinPraison/PraisonAI Length of output: 2739 🏁 Script executed: # Check what the test expects
rg -n "BudgetExceededError" src/praisonai-agents/tests/unit/test_token_budget.py -A 3 -B 3Repository: MervinPraison/PraisonAI Length of output: 1167 🏁 Script executed: # Read the BudgetExceededError class definition
sed -n '114,140p' src/praisonai-agents/praisonaiagents/errors.py | cat -nRepository: MervinPraison/PraisonAI Length of output: 990 🏁 Script executed: # Check if BudgetExceededError is re-exported from agent module
rg -n "BudgetExceededError" src/praisonai-agents/praisonaiagents/agent/__init__.py src/praisonai-agents/praisonaiagents/agent/agent.py 2>/dev/null | head -20Repository: MervinPraison/PraisonAI Length of output: 649 🏁 Script executed: # Check what exception handling wraps the budget check - full method context
sed -n '600,750p' src/praisonai-agents/praisonaiagents/agent/chat_mixin.py | cat -nRepository: MervinPraison/PraisonAI Length of output: 8856 Add missing import and fix budget-exceeded exception propagation.
Required fixesAdd the import: +from ..errors import BudgetExceededErrorFix the parameter: raise BudgetExceededError(
f"Agent '{self.name}' exceeded budget: ${self._total_cost:.4f} >= ${self._max_budget:.4f}",
- budget_type="tokens",
+ budget_type="cost",
limit=self._max_budget,
used=self._total_cost,
agent_id=self.name
)Add explicit re-raise in the generic handler at line 702: except BudgetExceededError:
raise
except Exception as e:
# existing overflow recovery logic🧰 Tools🪛 Ruff (0.15.9)[error] 669-669: Undefined name (F821) 🤖 Prompt for AI Agents |
||
| elif self._on_budget_exceeded == "warn": | ||
| logging.warning( | ||
| f"[budget] {self.name}: ${self._total_cost:.4f} exceeded " | ||
|
|
@@ -692,6 +699,8 @@ def _chat_completion(self, messages, temperature=1.0, tools=None, stream=True, r | |
|
|
||
| return final_response | ||
|
|
||
| except BudgetExceededError: | ||
| raise | ||
| except Exception as e: | ||
| error_str = str(e).lower() | ||
|
|
||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -97,29 +97,13 @@ def from_dict(cls, data: Dict[str, Any]) -> 'HandoffConfig': | |
| data["context_policy"] = ContextPolicy(data["context_policy"]) | ||
| return cls(**{k: v for k, v in data.items() if k in cls.__dataclass_fields__}) | ||
|
|
||
| class HandoffError(Exception): | ||
| """Base exception for handoff errors.""" | ||
| pass | ||
|
|
||
| class HandoffCycleError(HandoffError): | ||
| """Raised when a cycle is detected in handoff chain.""" | ||
| def __init__(self, chain: List[str]): | ||
| self.chain = chain | ||
| super().__init__(f"Handoff cycle detected: {' -> '.join(chain)}") | ||
|
|
||
| class HandoffDepthError(HandoffError): | ||
| """Raised when max handoff depth is exceeded.""" | ||
| def __init__(self, depth: int, max_depth: int): | ||
| self.depth = depth | ||
| self.max_depth = max_depth | ||
| super().__init__(f"Max handoff depth exceeded: {depth} > {max_depth}") | ||
|
|
||
| class HandoffTimeoutError(HandoffError): | ||
| """Raised when handoff times out.""" | ||
| def __init__(self, timeout: float, agent_name: str): | ||
| self.timeout = timeout | ||
| self.agent_name = agent_name | ||
| super().__init__(f"Handoff to {agent_name} timed out after {timeout}s") | ||
| # Import structured error hierarchy from central errors module | ||
| from ..errors import ( | ||
| HandoffError, | ||
| HandoffCycleError, | ||
| HandoffDepthError, | ||
| HandoffTimeoutError | ||
| ) | ||
|
|
||
| # Thread-local storage for tracking handoff chains | ||
| _handoff_context = threading.local() | ||
|
|
@@ -267,12 +251,26 @@ def _check_safety(self, source_agent: 'Agent') -> None: | |
| if self.config.detect_cycles: | ||
| chain = _get_handoff_chain() | ||
| if target_name in chain: | ||
| raise HandoffCycleError(chain + [target_name]) | ||
| cycle_path = chain + [target_name] | ||
| raise HandoffCycleError( | ||
| f"Handoff cycle detected: {' -> '.join(cycle_path)}", | ||
| source_agent=source_agent.name if hasattr(source_agent, 'name') else 'unknown', | ||
| target_agent=target_name, | ||
| agent_id=source_agent.name if hasattr(source_agent, 'name') else 'unknown', | ||
| cycle_path=cycle_path | ||
|
Comment on lines
+255
to
+260
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Migrate the timeout branch to the same constructor contract. These two raises were updated to the centralized, keyword-based shape, but Line 539 still calls 🐛 Suggested fix- raise HandoffTimeoutError(self.config.timeout_seconds, self.agent.name)
+ raise HandoffTimeoutError(
+ f"Handoff to {self.agent.name} timed out after {self.config.timeout_seconds}s",
+ timeout_seconds=self.config.timeout_seconds,
+ source_agent=source_agent.name if hasattr(source_agent, "name") else "unknown",
+ target_agent=self.agent.name,
+ agent_id=source_agent.name if hasattr(source_agent, "name") else "unknown",
+ )Also applies to: 266-273 🤖 Prompt for AI Agents |
||
| ) | ||
|
|
||
| # Check depth | ||
| current_depth = _get_handoff_depth() | ||
| if current_depth >= self.config.max_depth: | ||
| raise HandoffDepthError(current_depth + 1, self.config.max_depth) | ||
| raise HandoffDepthError( | ||
| f"Max handoff depth exceeded: {current_depth + 1} > {self.config.max_depth}", | ||
| source_agent=source_agent.name if hasattr(source_agent, 'name') else 'unknown', | ||
| target_agent=target_name, | ||
| agent_id=source_agent.name if hasattr(source_agent, 'name') else 'unknown', | ||
| max_depth=self.config.max_depth, | ||
| current_depth=current_depth + 1 | ||
| ) | ||
|
|
||
| def _prepare_context(self, source_agent: 'Agent', kwargs: Dict[str, Any]) -> HandoffInputData: | ||
| """ | ||
|
|
@@ -538,7 +536,13 @@ async def _execute(): | |
| handoff_depth=_get_handoff_depth(), | ||
| ) | ||
| self._execute_callback(self.config.on_error, source_agent, kwargs, result) | ||
| raise HandoffTimeoutError(self.config.timeout_seconds, self.agent.name) | ||
| raise HandoffTimeoutError( | ||
| f"Handoff to {self.agent.name} timed out after {self.config.timeout_seconds}s", | ||
| timeout_seconds=self.config.timeout_seconds, | ||
| source_agent=source_agent.name if hasattr(source_agent, "name") else "unknown", | ||
| target_agent=self.agent.name, | ||
| agent_id=source_agent.name if hasattr(source_agent, "name") else "unknown" | ||
| ) | ||
| except Exception as e: | ||
| result = HandoffResult( | ||
| success=False, | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -108,6 +108,93 @@ def get_multimodal_message(text_prompt: str, images: list) -> list: | |
| }) | ||
| return content | ||
|
|
||
| def _prepare_task_prompt(task, task_description, context_text=None): | ||
| """ | ||
| Prepare task prompt with context and memory (DRY helper). | ||
| """ | ||
| # Build task prompt - only use "User Input/Topic" format if there's actual content | ||
| if context_text and context_text.strip(): | ||
| task_prompt = f""" | ||
| User Input/Topic: {context_text} | ||
|
|
||
| Task: {task_description} | ||
| Expected Output: {task.expected_output} | ||
|
|
||
| IMPORTANT: Your response must be about the user's input/topic above. Incorporate it into your task.""" | ||
| else: | ||
| task_prompt = f""" | ||
| You need to do the following task: {task_description}. | ||
| Expected Output: {task.expected_output}.""" | ||
|
|
||
| # Add memory context if available | ||
| if task.memory: | ||
| try: | ||
| memory_context = task.memory.build_context_for_task(task.description) | ||
| if memory_context: | ||
|
Comment on lines
+129
to
+133
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Use the substituted description for memory lookup.
🔧 Suggested fix- memory_context = task.memory.build_context_for_task(task.description)
+ memory_context = task.memory.build_context_for_task(task_description)🤖 Prompt for AI Agents |
||
| # Log detailed memory context for debugging | ||
| logger.debug(f"Memory context for task '{task.description}': {memory_context}") | ||
| # Include actual memory content without verbose headers (essential for AI agent functionality) | ||
| task_prompt += f"\n\n{memory_context}" | ||
| except Exception as e: | ||
| logger.error(f"Error getting memory context: {e}") | ||
|
|
||
| task_prompt += "\nPlease provide only the final result of your work. Do not add any conversation or extra explanation." | ||
| return task_prompt | ||
|
|
||
| def _execute_with_agent_sync(executor_agent, task_prompt, task, tools, stream=None): | ||
| """ | ||
| Execute task with agent synchronously (DRY helper). | ||
| """ | ||
| if task.images: | ||
| return executor_agent.chat( | ||
| get_multimodal_message(task_prompt, task.images), | ||
| tools=tools, | ||
| output_json=task.output_json, | ||
| output_pydantic=task.output_pydantic, | ||
| stream=stream, | ||
| task_name=task.name, | ||
| task_description=task.description, | ||
| task_id=task.id | ||
| ) | ||
| else: | ||
| return executor_agent.chat( | ||
| task_prompt, | ||
| tools=tools, | ||
| output_json=task.output_json, | ||
| output_pydantic=task.output_pydantic, | ||
| stream=stream, | ||
| task_name=task.name, | ||
| task_description=task.description, | ||
| task_id=task.id | ||
| ) | ||
|
|
||
| async def _execute_with_agent_async(executor_agent, task_prompt, task, tools, stream=None): | ||
| """ | ||
| Execute task with agent asynchronously (DRY helper). | ||
| """ | ||
| if task.images: | ||
| return await executor_agent.achat( | ||
| get_multimodal_message(task_prompt, task.images), | ||
| tools=tools, | ||
| output_json=task.output_json, | ||
| output_pydantic=task.output_pydantic, | ||
| stream=stream, | ||
| task_name=task.name, | ||
| task_description=task.description, | ||
| task_id=task.id | ||
| ) | ||
| else: | ||
| return await executor_agent.achat( | ||
| task_prompt, | ||
| tools=tools, | ||
| output_json=task.output_json, | ||
| output_pydantic=task.output_pydantic, | ||
| stream=stream, | ||
| task_name=task.name, | ||
| task_description=task.description, | ||
| task_id=task.id | ||
| ) | ||
|
|
||
| def process_task_context(context_item, verbose=0, user_id=None): | ||
| """ | ||
| Process a single context item for task execution. | ||
|
|
@@ -703,59 +790,17 @@ async def aexecute_task(self, task_id): | |
| context_separator = '\n\n' | ||
| context_text = context_separator.join(unique_contexts) | ||
|
|
||
| # Build task prompt - only use "User Input/Topic" format if there's actual content | ||
| if context_text and context_text.strip(): | ||
| task_prompt = f""" | ||
| User Input/Topic: {context_text} | ||
|
|
||
| Task: {task_description} | ||
| Expected Output: {task.expected_output} | ||
|
|
||
| IMPORTANT: Your response must be about the user's input/topic above. Incorporate it into your task.""" | ||
| else: | ||
| task_prompt = f""" | ||
| You need to do the following task: {task_description}. | ||
| Expected Output: {task.expected_output}.""" | ||
|
|
||
| # Add memory context if available | ||
| if task.memory: | ||
| try: | ||
| memory_context = task.memory.build_context_for_task(task.description) | ||
| if memory_context: | ||
| # Log detailed memory context for debugging | ||
| logger.debug(f"Memory context for task '{task.description}': {memory_context}") | ||
| # Include actual memory content without verbose headers (essential for AI agent functionality) | ||
| task_prompt += f"\n\n{memory_context}" | ||
| except Exception as e: | ||
| logger.error(f"Error getting memory context: {e}") | ||
|
|
||
| task_prompt += "\nPlease provide only the final result of your work. Do not add any conversation or extra explanation." | ||
| # Build task prompt using DRY helper | ||
| task_prompt = _prepare_task_prompt(task, task_description, context_text) | ||
|
|
||
| if self.verbose >= 2: | ||
| logger.info(f"Executing task {task_id}: {task_description} using {executor_agent.display_name}") | ||
| logger.debug(f"Starting execution of task {task_id} with prompt:\n{task_prompt}") | ||
|
|
||
| if task.images: | ||
| # Use shared multimodal helper (DRY - defined at module level) | ||
| agent_output = await executor_agent.achat( | ||
| get_multimodal_message(task_prompt, task.images), | ||
| tools=tools, | ||
| output_json=task.output_json, | ||
| output_pydantic=task.output_pydantic, | ||
| task_name=task.name, | ||
| task_description=task.description, | ||
| task_id=task.id | ||
| ) | ||
| else: | ||
| agent_output = await executor_agent.achat( | ||
| task_prompt, | ||
| tools=tools, | ||
| output_json=task.output_json, | ||
| output_pydantic=task.output_pydantic, | ||
| task_name=task.name, | ||
| task_description=task.description, | ||
| task_id=task.id | ||
| ) | ||
| # Execute with agent using DRY helper (fixes missing stream parameter) | ||
| agent_output = await _execute_with_agent_async( | ||
| executor_agent, task_prompt, task, tools, stream=self.stream | ||
| ) | ||
|
Comment on lines
+800
to
+803
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Don’t override agent-level streaming with a team-wide default. These calls now always pass Also applies to: 1121-1124 🤖 Prompt for AI Agents |
||
|
|
||
| if agent_output: | ||
| # Store the response in memory | ||
|
|
@@ -1066,60 +1111,17 @@ def execute_task(self, task_id): | |
| context_separator = '\n\n' | ||
| context_text = context_separator.join(unique_contexts) | ||
|
|
||
| # Build task prompt - only use "User Input/Topic" format if there's actual content | ||
| if context_text and context_text.strip(): | ||
| task_prompt = f""" | ||
| User Input/Topic: {context_text} | ||
|
|
||
| Task: {task_description} | ||
| Expected Output: {task.expected_output} | ||
|
|
||
| IMPORTANT: Your response must be about the user's input/topic above. Incorporate it into your task.""" | ||
| else: | ||
| task_prompt = f""" | ||
| You need to do the following task: {task_description}. | ||
| Expected Output: {task.expected_output}.""" | ||
|
|
||
| # Add memory context if available | ||
| if task.memory: | ||
| try: | ||
| memory_context = task.memory.build_context_for_task(task.description) | ||
| if memory_context: | ||
| # Log detailed memory context for debugging | ||
| logger.debug(f"Memory context for task '{task.description}': {memory_context}") | ||
| # Include actual memory content without verbose headers (essential for AI agent functionality) | ||
| task_prompt += f"\n\n{memory_context}" | ||
| except Exception as e: | ||
| logger.error(f"Error getting memory context: {e}") | ||
|
|
||
| task_prompt += "\nPlease provide only the final result of your work. Do not add any conversation or extra explanation." | ||
| # Build task prompt using DRY helper | ||
| task_prompt = _prepare_task_prompt(task, task_description, context_text) | ||
|
|
||
| if self.verbose >= 2: | ||
| logger.info(f"Executing task {task_id}: {task.description} using {executor_agent.display_name}") | ||
| logger.debug(f"Starting execution of task {task_id} with prompt:\n{task_prompt}") | ||
|
|
||
| if task.images: | ||
| # Use shared multimodal helper (DRY - defined at module level) | ||
| agent_output = executor_agent.chat( | ||
| get_multimodal_message(task_prompt, task.images), | ||
| tools=tools, | ||
| output_json=task.output_json, | ||
| output_pydantic=task.output_pydantic, | ||
| task_name=task.name, | ||
| task_description=task.description, | ||
| task_id=task_id | ||
| ) | ||
| else: | ||
| agent_output = executor_agent.chat( | ||
| task_prompt, | ||
| tools=tools, | ||
| output_json=task.output_json, | ||
| output_pydantic=task.output_pydantic, | ||
| stream=self.stream, | ||
| task_name=task.name, | ||
| task_description=task.description, | ||
| task_id=task_id | ||
| ) | ||
| # Execute with agent using DRY helper | ||
| agent_output = _execute_with_agent_sync( | ||
| executor_agent, task_prompt, task, tools, stream=self.stream | ||
| ) | ||
|
|
||
| if agent_output: | ||
| # Store the response in memory | ||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.