From 2c4af80871bbb1ddf8b9f95a21d1268040bdf048 Mon Sep 17 00:00:00 2001 From: MervinPraison Date: Fri, 6 Jun 2025 17:52:43 +0100 Subject: [PATCH 1/3] Enhance telemetry support and update dependencies - Added 'posthog>=4.0.0' to dependencies and optional 'telemetry' section in pyproject.toml. - Updated blog_agent to use llm_config for improved clarity. - Disabled memory in PraisonAIAgents instantiation for better control. - Introduced telemetry support with lazy loading and fallback functions in __init__.py. This update improves the integration of telemetry features while maintaining existing functionality. --- src/praisonai-agents/TELEMETRY_SUMMARY.md | 33 ++ src/praisonai-agents/debug_auto_instrument.py | 44 ++ src/praisonai-agents/debug_telemetry.py | 69 +++ .../debug_telemetry_double.py | 69 +++ .../praisonaiagents/__init__.py | 48 ++- .../praisonaiagents/telemetry/README.md | 108 +++++ .../praisonaiagents/telemetry/__init__.py | 102 +++++ .../praisonaiagents/telemetry/integration.py | 242 +++++++++++ .../praisonaiagents/telemetry/telemetry.py | 350 +++++++++++++++ src/praisonai-agents/pyproject.toml | 11 +- src/praisonai-agents/telemetry_analysis.md | 123 ++++++ src/praisonai-agents/telemetry_example.py | 86 ++++ src/praisonai-agents/telemetry_minimal.py | 53 +++ src/praisonai-agents/test.py | 4 +- .../test_auto_telemetry_final.py | 40 ++ src/praisonai-agents/test_import_order.py | 38 ++ .../test_manual_instrumentation.py | 54 +++ src/praisonai-agents/test_posthog.py | 64 +++ src/praisonai-agents/test_posthog_detailed.py | 139 ++++++ src/praisonai-agents/test_posthog_direct.py | 43 ++ src/praisonai-agents/test_posthog_error.py | 20 + src/praisonai-agents/test_posthog_import.py | 50 +++ .../test_telemetry_automatic.py | 47 +++ .../test_telemetry_integration.py | 126 ++++++ src/praisonai-agents/test_telemetry_simple.py | 29 ++ .../tests/state_based_workflow_example.py | 294 +++++++++++++ .../tests/state_management_example.py | 259 ++++++++++++ .../tests/state_with_memory_example.py | 397 ++++++++++++++++++ .../tests/telemetry_example.py | 258 ++++++++++++ 29 files changed, 3195 insertions(+), 5 deletions(-) create mode 100644 src/praisonai-agents/TELEMETRY_SUMMARY.md create mode 100644 src/praisonai-agents/debug_auto_instrument.py create mode 100644 src/praisonai-agents/debug_telemetry.py create mode 100644 src/praisonai-agents/debug_telemetry_double.py create mode 100644 src/praisonai-agents/praisonaiagents/telemetry/README.md create mode 100644 src/praisonai-agents/praisonaiagents/telemetry/__init__.py create mode 100644 src/praisonai-agents/praisonaiagents/telemetry/integration.py create mode 100644 src/praisonai-agents/praisonaiagents/telemetry/telemetry.py create mode 100644 src/praisonai-agents/telemetry_analysis.md create mode 100644 src/praisonai-agents/telemetry_example.py create mode 100644 src/praisonai-agents/telemetry_minimal.py create mode 100644 src/praisonai-agents/test_auto_telemetry_final.py create mode 100644 src/praisonai-agents/test_import_order.py create mode 100644 src/praisonai-agents/test_manual_instrumentation.py create mode 100644 src/praisonai-agents/test_posthog.py create mode 100644 src/praisonai-agents/test_posthog_detailed.py create mode 100644 src/praisonai-agents/test_posthog_direct.py create mode 100644 src/praisonai-agents/test_posthog_error.py create mode 100644 src/praisonai-agents/test_posthog_import.py create mode 100644 src/praisonai-agents/test_telemetry_automatic.py create mode 100644 src/praisonai-agents/test_telemetry_integration.py create mode 100644 src/praisonai-agents/test_telemetry_simple.py create mode 100644 src/praisonai-agents/tests/state_based_workflow_example.py create mode 100644 src/praisonai-agents/tests/state_management_example.py create mode 100644 src/praisonai-agents/tests/state_with_memory_example.py create mode 100644 src/praisonai-agents/tests/telemetry_example.py diff --git a/src/praisonai-agents/TELEMETRY_SUMMARY.md b/src/praisonai-agents/TELEMETRY_SUMMARY.md new file mode 100644 index 000000000..8bfd1b441 --- /dev/null +++ b/src/praisonai-agents/TELEMETRY_SUMMARY.md @@ -0,0 +1,33 @@ +# Telemetry Implementation Summary + +## What Was Fixed + +1. **PostHog initialization error** - Removed invalid `events_to_ignore` parameter +2. **Missing imports** - Added `MinimalTelemetry` and `TelemetryCollector` imports to telemetry `__init__.py` +3. **Wrong method instrumentation** - Changed from `agent.execute()` to `agent.chat()` +4. **Task tracking** - Added instrumentation for `workflow.execute_task()` +5. **Automatic setup** - Added `auto_instrument_all()` in main `__init__.py` +6. **Automatic flush** - Added `atexit` handler to send data on program exit + +## Current Status + +✅ **Telemetry is now working automatically!** + +- Enabled by default (opt-out via environment variables) +- Tracks agent executions and task completions +- Sends anonymous data to PostHog on program exit +- No manual setup required + +## Metrics Example +``` +Telemetry metrics collected: +- Agent executions: 4 +- Task completions: 2 +- Errors: 0 +- Session ID: 33873e62396d8b4c +``` + +## To Disable +Set any of these environment variables: +- `PRAISONAI_TELEMETRY_DISABLED=true` +- `DO_NOT_TRACK=true` \ No newline at end of file diff --git a/src/praisonai-agents/debug_auto_instrument.py b/src/praisonai-agents/debug_auto_instrument.py new file mode 100644 index 000000000..c565a1444 --- /dev/null +++ b/src/praisonai-agents/debug_auto_instrument.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python3 +""" +Debug auto-instrumentation. +""" + +print("1. Import telemetry module...") +import praisonaiagents.telemetry +print(" Telemetry module imported") + +print("\n2. Check if auto_instrument_all was called...") +print(f" _initialized: {praisonaiagents.telemetry._initialized}") + +print("\n3. Import Agent and PraisonAIAgents...") +from praisonaiagents import Agent, PraisonAIAgents +print(" Classes imported") + +print("\n4. Check if classes are instrumented...") +agent = Agent(name="Test", role="Test", goal="Test", instructions="Test") +print(f" Agent.__init__ name: {Agent.__init__.__name__}") +print(f" agent.execute exists: {hasattr(agent, 'execute')}") + +print("\n5. Manually call auto_instrument_all...") +from praisonaiagents.telemetry.integration import auto_instrument_all +auto_instrument_all() +print(" auto_instrument_all() called") + +print("\n6. Create new agent after instrumentation...") +agent2 = Agent(name="Test2", role="Test2", goal="Test2", instructions="Test2") +print(f" Agent.__init__ name after: {Agent.__init__.__name__}") +print(f" agent2.execute exists: {hasattr(agent2, 'execute')}") + +print("\n7. Check if execute is wrapped...") +if hasattr(agent2, 'execute'): + print(f" agent2.execute name: {agent2.execute.__name__}") + print(f" agent2.execute wrapped: {hasattr(agent2.execute, '__wrapped__')}") + +print("\n8. Import telemetry and check if it's working...") +from praisonaiagents.telemetry import get_telemetry +telemetry = get_telemetry() +print(f" Telemetry enabled: {telemetry.enabled}") +print(f" PostHog available: {telemetry._posthog is not None}") + +# The key insight: auto_instrument_all needs to be called AFTER +# the Agent and PraisonAIAgents classes are imported! \ No newline at end of file diff --git a/src/praisonai-agents/debug_telemetry.py b/src/praisonai-agents/debug_telemetry.py new file mode 100644 index 000000000..7a063b48f --- /dev/null +++ b/src/praisonai-agents/debug_telemetry.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python3 +""" +Debug telemetry instrumentation to see what's happening. +""" + +import os +# Make sure telemetry is enabled +if 'PRAISONAI_TELEMETRY_DISABLED' in os.environ: + del os.environ['PRAISONAI_TELEMETRY_DISABLED'] + +print("1. Importing modules...") +from praisonaiagents import Agent, Task, PraisonAIAgents +from praisonaiagents.telemetry import get_telemetry + +print("\n2. Checking telemetry status...") +telemetry = get_telemetry() +print(f"Telemetry enabled: {telemetry.enabled}") +print(f"PostHog available: {telemetry._posthog is not None}") + +print("\n3. Creating agent...") +agent = Agent( + name="TestAgent", + role="Test Role", + goal="Test Goal", + instructions="Test instructions" +) + +# Check if agent.execute is instrumented +print(f"\n4. Checking agent instrumentation...") +print(f"Agent has execute method: {hasattr(agent, 'execute')}") +if hasattr(agent, 'execute'): + print(f"Execute method type: {type(agent.execute)}") + print(f"Execute method name: {agent.execute.__name__ if hasattr(agent.execute, '__name__') else 'No name'}") + print(f"Is wrapped: {'instrumented' in str(agent.execute.__name__) if hasattr(agent.execute, '__name__') else 'Unknown'}") + +print("\n5. Creating task...") +task = Task( + description="Test task", + expected_output="Test output", + agent=agent +) + +print("\n6. Creating workflow...") +workflow = PraisonAIAgents( + agents=[agent], + tasks=[task], + process="sequential" +) + +# Check if workflow.start is instrumented +print(f"\n7. Checking workflow instrumentation...") +print(f"Workflow has start method: {hasattr(workflow, 'start')}") +if hasattr(workflow, 'start'): + print(f"Start method type: {type(workflow.start)}") + print(f"Start method name: {workflow.start.__name__ if hasattr(workflow.start, '__name__') else 'No name'}") + print(f"Is wrapped: {'instrumented' in str(workflow.start.__name__) if hasattr(workflow.start, '__name__') else 'Unknown'}") + +print("\n8. Running workflow...") +result = workflow.start() + +print("\n9. Checking metrics...") +metrics = telemetry.get_metrics() +print(f"Metrics: {metrics}") + +print("\n10. Manually tracking to verify telemetry works...") +telemetry.track_agent_execution("ManualTest", success=True) +telemetry.track_task_completion("ManualTask", success=True) +manual_metrics = telemetry.get_metrics() +print(f"After manual tracking: {manual_metrics['metrics']}") \ No newline at end of file diff --git a/src/praisonai-agents/debug_telemetry_double.py b/src/praisonai-agents/debug_telemetry_double.py new file mode 100644 index 000000000..e15b66d83 --- /dev/null +++ b/src/praisonai-agents/debug_telemetry_double.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python3 +""" +Debug double-counting in telemetry. +""" + +from praisonaiagents import Agent, Task, PraisonAIAgents +from praisonaiagents.telemetry import get_telemetry + +# Get telemetry instance +telemetry = get_telemetry() + +# Clear any existing metrics by flushing +telemetry.flush() + +print("Starting fresh telemetry tracking...\n") + +# Create ONE agent +print("Creating 1 agent...") +agent = Agent( + name="SingleAgent", + role="Test Role", + goal="Test Goal", + instructions="Test instructions" +) + +# Create ONE task +print("Creating 1 task...") +task = Task( + description="Single test task", + expected_output="Test output", + agent=agent +) + +# Create workflow with ONE agent and ONE task +print("Creating workflow with 1 agent and 1 task...") +workflow = PraisonAIAgents( + agents=[agent], + tasks=[task], + process="sequential" +) + +# Check metrics before running +metrics_before = telemetry.get_metrics() +print(f"\nMetrics BEFORE running workflow:") +print(f" Agent executions: {metrics_before['metrics']['agent_executions']}") +print(f" Task completions: {metrics_before['metrics']['task_completions']}") + +# Run the workflow +print("\nRunning workflow...") +result = workflow.start() + +# Check metrics after running +metrics_after = telemetry.get_metrics() +print(f"\nMetrics AFTER running workflow:") +print(f" Agent executions: {metrics_after['metrics']['agent_executions']} (expected: 1)") +print(f" Task completions: {metrics_after['metrics']['task_completions']} (expected: 1)") + +if metrics_after['metrics']['agent_executions'] > 1: + print("\n❌ ISSUE: Agent executions are being double-counted!") + print(" Possible causes:") + print(" - Agent method is being called multiple times") + print(" - Instrumentation is being applied twice") + print(" - Multiple tracking calls for same execution") + +if metrics_after['metrics']['task_completions'] > 1: + print("\n❌ ISSUE: Task completions are being double-counted!") + print(" Possible causes:") + print(" - Task completion is tracked in multiple places") + print(" - Instrumentation is being applied twice") \ No newline at end of file diff --git a/src/praisonai-agents/praisonaiagents/__init__.py b/src/praisonai-agents/praisonaiagents/__init__.py index 7a6036e6d..6ef4a6833 100644 --- a/src/praisonai-agents/praisonaiagents/__init__.py +++ b/src/praisonai-agents/praisonaiagents/__init__.py @@ -30,9 +30,50 @@ async_display_callbacks, ) +# Telemetry support (lazy loaded) +try: + from .telemetry import ( + get_telemetry, + enable_telemetry, + disable_telemetry, + MinimalTelemetry, + TelemetryCollector + ) + _telemetry_available = True +except ImportError: + # Telemetry not available - provide stub functions + _telemetry_available = False + def get_telemetry(): + return None + + def enable_telemetry(*args, **kwargs): + import logging + logging.warning( + "Telemetry not available. Install with: pip install praisonaiagents[telemetry]" + ) + return None + + def disable_telemetry(): + pass + + MinimalTelemetry = None + TelemetryCollector = None + # Add Agents as an alias for PraisonAIAgents Agents = PraisonAIAgents +# Apply telemetry auto-instrumentation after all imports +if _telemetry_available: + try: + # Only instrument if telemetry is enabled + _telemetry = get_telemetry() + if _telemetry and _telemetry.enabled: + from .telemetry.integration import auto_instrument_all + auto_instrument_all(_telemetry) + except Exception: + # Silently fail if there are any issues + pass + __all__ = [ 'Agent', 'ImageAgent', @@ -60,5 +101,10 @@ 'Chunking', 'MCP', 'GuardrailResult', - 'LLMGuardrail' + 'LLMGuardrail', + 'get_telemetry', + 'enable_telemetry', + 'disable_telemetry', + 'MinimalTelemetry', + 'TelemetryCollector' ] \ No newline at end of file diff --git a/src/praisonai-agents/praisonaiagents/telemetry/README.md b/src/praisonai-agents/praisonaiagents/telemetry/README.md new file mode 100644 index 000000000..56301265e --- /dev/null +++ b/src/praisonai-agents/praisonaiagents/telemetry/README.md @@ -0,0 +1,108 @@ +# PraisonAI Agents Telemetry + +This module provides minimal, privacy-focused telemetry for PraisonAI Agents. + +## Privacy Guarantees + +- **No personal data is collected** - No prompts, responses, or user content +- **Anonymous metrics only** - Usage counts and feature adoption +- **Opt-out by default** - Respects standard privacy preferences +- **Transparent collection** - See exactly what's tracked below + +## What We Collect + +We collect only anonymous usage metrics: +- Number of agent executions +- Number of task completions +- Tool usage (names only, no arguments) +- Error types (no error messages) +- Framework version and OS type +- Anonymous session ID (regenerated each run) + +## Disabling Telemetry + +Telemetry can be disabled in three ways: + +### 1. Environment Variables (Recommended) + +Set any of these environment variables: +```bash +export PRAISONAI_TELEMETRY_DISABLED=true +export PRAISONAI_DISABLE_TELEMETRY=true +export DO_NOT_TRACK=true # Universal standard +``` + +### 2. Programmatically + +```python +from praisonaiagents.telemetry import disable_telemetry +disable_telemetry() +``` + +### 3. At Runtime + +```python +from praisonaiagents.telemetry import get_telemetry +telemetry = get_telemetry() +telemetry.enabled = False +``` + +## Usage + +The telemetry module integrates automatically with PraisonAI Agents: + +```python +from praisonaiagents import Agent, Task, PraisonAIAgents + +# Telemetry is automatically enabled (unless disabled by environment) +agent = Agent(name="MyAgent", role="Assistant") +task = Task(description="Help user", agent=agent) + +workflow = PraisonAIAgents(agents=[agent], tasks=[task]) +result = workflow.start() + +# Check telemetry metrics +from praisonaiagents.telemetry import get_telemetry +telemetry = get_telemetry() +print(telemetry.get_metrics()) +``` + +## Implementation Details + +The telemetry implementation is minimal and lightweight: +- No external dependencies required +- No network calls in current implementation +- Metrics stored in memory only +- Future versions may send to PostHog or similar privacy-focused services + +## Backward Compatibility + +The module maintains compatibility with the previous telemetry interface: + +```python +from praisonaiagents.telemetry import TelemetryCollector + +collector = TelemetryCollector() +collector.start() + +with collector.trace_agent_execution("MyAgent"): + # Agent execution code + pass + +collector.stop() +``` + +## Contributing + +When contributing to telemetry: +1. Never collect personal data or user content +2. Always make new metrics opt-out +3. Document what's collected +4. Keep the implementation minimal + +## Future Plans + +- Integration with PostHog for anonymous analytics +- Aggregate usage statistics dashboard +- Opt-in detailed performance metrics +- Self-hosted telemetry endpoint option \ No newline at end of file diff --git a/src/praisonai-agents/praisonaiagents/telemetry/__init__.py b/src/praisonai-agents/praisonaiagents/telemetry/__init__.py new file mode 100644 index 000000000..6faf913b6 --- /dev/null +++ b/src/praisonai-agents/praisonaiagents/telemetry/__init__.py @@ -0,0 +1,102 @@ +""" +PraisonAI Agents Minimal Telemetry Module + +This module provides anonymous usage tracking with privacy-first design. +Telemetry is opt-out and can be disabled via environment variables: +- PRAISONAI_TELEMETRY_DISABLED=true +- PRAISONAI_DISABLE_TELEMETRY=true +- DO_NOT_TRACK=true + +No personal data, prompts, or responses are collected. +""" + +import os +import atexit +from typing import Optional, TYPE_CHECKING + +if TYPE_CHECKING: + from .telemetry import MinimalTelemetry, TelemetryCollector + +# Import the classes for real (not just type checking) +from .telemetry import MinimalTelemetry, TelemetryCollector + +__all__ = [ + 'get_telemetry', + 'enable_telemetry', + 'disable_telemetry', + 'MinimalTelemetry', + 'TelemetryCollector', # For backward compatibility +] + + +def get_telemetry() -> 'MinimalTelemetry': + """Get the global telemetry instance.""" + from .telemetry import get_telemetry as _get_telemetry + return _get_telemetry() + + +def enable_telemetry(): + """Enable telemetry (if not disabled by environment).""" + from .telemetry import enable_telemetry as _enable_telemetry + _enable_telemetry() + + +def disable_telemetry(): + """Disable telemetry.""" + from .telemetry import disable_telemetry as _disable_telemetry + _disable_telemetry() + + +# Auto-instrumentation and cleanup setup +_initialized = False +_atexit_registered = False + +def _ensure_atexit(): + """Ensure atexit handler is registered.""" + global _atexit_registered + if _atexit_registered: + return + + # Check if telemetry should be disabled + telemetry_disabled = any([ + os.environ.get('PRAISONAI_TELEMETRY_DISABLED', '').lower() in ('true', '1', 'yes'), + os.environ.get('PRAISONAI_DISABLE_TELEMETRY', '').lower() in ('true', '1', 'yes'), + os.environ.get('DO_NOT_TRACK', '').lower() in ('true', '1', 'yes'), + ]) + + if not telemetry_disabled: + # Register atexit handler to flush telemetry on exit + atexit.register(lambda: get_telemetry().flush()) + _atexit_registered = True + +def _initialize_telemetry(): + """Initialize telemetry with auto-instrumentation and cleanup.""" + global _initialized + if _initialized: + return + + # Ensure atexit is registered + _ensure_atexit() + + # Check if telemetry should be disabled + telemetry_disabled = any([ + os.environ.get('PRAISONAI_TELEMETRY_DISABLED', '').lower() in ('true', '1', 'yes'), + os.environ.get('PRAISONAI_DISABLE_TELEMETRY', '').lower() in ('true', '1', 'yes'), + os.environ.get('DO_NOT_TRACK', '').lower() in ('true', '1', 'yes'), + ]) + + if not telemetry_disabled: + try: + # Defer the actual instrumentation to avoid circular imports + # This will be called when get_telemetry() is first accessed + _initialized = True + except Exception: + # Silently fail if there are any issues + pass + + +# No need for lazy auto-instrumentation here since main __init__.py handles it + + +# Initialize atexit handler early +_ensure_atexit() \ No newline at end of file diff --git a/src/praisonai-agents/praisonaiagents/telemetry/integration.py b/src/praisonai-agents/praisonaiagents/telemetry/integration.py new file mode 100644 index 000000000..6b7a6e8e2 --- /dev/null +++ b/src/praisonai-agents/praisonaiagents/telemetry/integration.py @@ -0,0 +1,242 @@ +""" +Simplified integration module for adding telemetry to core PraisonAI components. +""" + +from typing import Any, Optional, TYPE_CHECKING +from functools import wraps +import time + +if TYPE_CHECKING: + from .telemetry import MinimalTelemetry + from ..agent.agent import Agent + from ..task.task import Task + from ..agents.agents import PraisonAIAgents + + +def instrument_agent(agent: 'Agent', telemetry: Optional['MinimalTelemetry'] = None): + """ + Instrument an Agent instance with minimal telemetry. + + Args: + agent: The Agent instance to instrument + telemetry: Optional telemetry instance (uses global if not provided) + """ + if not telemetry: + from .telemetry import get_telemetry + telemetry = get_telemetry() + + if not telemetry.enabled: + return agent + + # Check if agent is already instrumented to avoid double-counting + if hasattr(agent, '_telemetry_instrumented'): + return agent + + # Store original methods + original_chat = agent.chat if hasattr(agent, 'chat') else None + original_start = agent.start if hasattr(agent, 'start') else None + original_run = agent.run if hasattr(agent, 'run') else None + original_execute_tool = agent.execute_tool if hasattr(agent, 'execute_tool') else None + + # Wrap chat method if it exists (this is the main method called by workflow) + if original_chat: + @wraps(original_chat) + def instrumented_chat(*args, **kwargs): + try: + result = original_chat(*args, **kwargs) + telemetry.track_agent_execution(agent.name, success=True) + return result + except Exception as e: + telemetry.track_agent_execution(agent.name, success=False) + telemetry.track_error(type(e).__name__) + raise + + agent.chat = instrumented_chat + + # Wrap start method if it exists + if original_start: + @wraps(original_start) + def instrumented_start(*args, **kwargs): + try: + result = original_start(*args, **kwargs) + telemetry.track_agent_execution(agent.name, success=True) + return result + except Exception as e: + telemetry.track_agent_execution(agent.name, success=False) + telemetry.track_error(type(e).__name__) + raise + + agent.start = instrumented_start + + # Wrap run method if it exists + if original_run: + @wraps(original_run) + def instrumented_run(*args, **kwargs): + try: + result = original_run(*args, **kwargs) + telemetry.track_agent_execution(agent.name, success=True) + return result + except Exception as e: + telemetry.track_agent_execution(agent.name, success=False) + telemetry.track_error(type(e).__name__) + raise + + agent.run = instrumented_run + + # Wrap execute_tool method + if original_execute_tool: + @wraps(original_execute_tool) + def instrumented_execute_tool(tool_name: str, *args, **kwargs): + try: + result = original_execute_tool(tool_name, *args, **kwargs) + telemetry.track_tool_usage(tool_name, success=True) + return result + except Exception as e: + telemetry.track_tool_usage(tool_name, success=False) + telemetry.track_error(type(e).__name__) + raise + + agent.execute_tool = instrumented_execute_tool + + # Mark agent as instrumented to avoid double instrumentation + agent._telemetry_instrumented = True + + return agent + + +def instrument_workflow(workflow: 'PraisonAIAgents', telemetry: Optional['MinimalTelemetry'] = None): + """ + Instrument a PraisonAIAgents workflow with minimal telemetry. + + Args: + workflow: The PraisonAIAgents instance to instrument + telemetry: Optional telemetry instance (uses global if not provided) + """ + if not telemetry: + from .telemetry import get_telemetry + telemetry = get_telemetry() + + if not telemetry.enabled: + return workflow + + # Check if workflow is already instrumented to avoid double-counting + if hasattr(workflow, '_telemetry_instrumented'): + return workflow + + # Track feature usage + telemetry.track_feature_usage(f"workflow_{workflow.process}" if hasattr(workflow, 'process') else "workflow") + + # Instrument all agents in the workflow + if hasattr(workflow, 'agents') and workflow.agents: + for agent in workflow.agents: + instrument_agent(agent, telemetry) + + # Wrap the execute_task method to track task completions + if hasattr(workflow, 'execute_task'): + original_execute_task = workflow.execute_task + + @wraps(original_execute_task) + def instrumented_execute_task(task_id, *args, **kwargs): + task = None + try: + # Get task info + if hasattr(workflow, 'tasks') and task_id < len(workflow.tasks): + task = workflow.tasks[task_id] + + result = original_execute_task(task_id, *args, **kwargs) + + # Track task completion + task_name = task.name if task and hasattr(task, 'name') else f"task_{task_id}" + telemetry.track_task_completion(task_name, success=True) + + return result + except Exception as e: + telemetry.track_error(type(e).__name__) + if task: + task_name = task.name if hasattr(task, 'name') else f"task_{task_id}" + telemetry.track_task_completion(task_name, success=False) + raise + + workflow.execute_task = instrumented_execute_task + + # Wrap the start method + original_start = workflow.start + + @wraps(original_start) + def instrumented_start(*args, **kwargs): + try: + result = original_start(*args, **kwargs) + # Don't double-track here since agent.chat already tracks execution + return result + except Exception as e: + telemetry.track_error(type(e).__name__) + raise + + workflow.start = instrumented_start + + # Also wrap astart if it exists (async version) + if hasattr(workflow, 'astart'): + original_astart = workflow.astart + + @wraps(original_astart) + async def instrumented_astart(*args, **kwargs): + try: + result = await original_astart(*args, **kwargs) + # Don't double-track here since agent.chat already tracks execution + return result + except Exception as e: + telemetry.track_error(type(e).__name__) + raise + + workflow.astart = instrumented_astart + + # Mark workflow as instrumented to avoid double instrumentation + workflow._telemetry_instrumented = True + + return workflow + + +# Auto-instrumentation helper +def auto_instrument_all(telemetry: Optional['MinimalTelemetry'] = None): + """ + Automatically instrument all new instances of Agent and PraisonAIAgents. + This should be called after enabling telemetry. + + Args: + telemetry: Optional telemetry instance (uses global if not provided) + """ + if not telemetry: + from .telemetry import get_telemetry + telemetry = get_telemetry() + + if not telemetry.enabled: + return + + try: + # Import the classes + from ..agent.agent import Agent + from ..agents.agents import PraisonAIAgents + + # Store original __init__ methods + original_agent_init = Agent.__init__ + original_workflow_init = PraisonAIAgents.__init__ + + # Wrap Agent.__init__ + @wraps(original_agent_init) + def agent_init_wrapper(self, *args, **kwargs): + original_agent_init(self, *args, **kwargs) + instrument_agent(self, telemetry) + + # Wrap PraisonAIAgents.__init__ + @wraps(original_workflow_init) + def workflow_init_wrapper(self, *args, **kwargs): + original_workflow_init(self, *args, **kwargs) + instrument_workflow(self, telemetry) + + # Apply wrapped constructors + Agent.__init__ = agent_init_wrapper + PraisonAIAgents.__init__ = workflow_init_wrapper + + except ImportError: + # Classes not available, skip auto-instrumentation + pass \ No newline at end of file diff --git a/src/praisonai-agents/praisonaiagents/telemetry/telemetry.py b/src/praisonai-agents/praisonaiagents/telemetry/telemetry.py new file mode 100644 index 000000000..6fb109701 --- /dev/null +++ b/src/praisonai-agents/praisonaiagents/telemetry/telemetry.py @@ -0,0 +1,350 @@ +""" +Minimal telemetry implementation for PraisonAI Agents. + +This module provides anonymous usage tracking with privacy-first design. +All telemetry is opt-out via environment variables. +""" + +import os +import time +import platform +import hashlib +from typing import Dict, Any, Optional +from datetime import datetime +import logging + +# Try to import PostHog +try: + from posthog import Posthog + POSTHOG_AVAILABLE = True +except ImportError: + POSTHOG_AVAILABLE = False + +# Check for opt-out environment variables +_TELEMETRY_DISABLED = any([ + os.environ.get('PRAISONAI_TELEMETRY_DISABLED', '').lower() in ('true', '1', 'yes'), + os.environ.get('PRAISONAI_DISABLE_TELEMETRY', '').lower() in ('true', '1', 'yes'), + os.environ.get('DO_NOT_TRACK', '').lower() in ('true', '1', 'yes'), +]) + + +class MinimalTelemetry: + """ + Minimal telemetry collector for anonymous usage tracking. + + Privacy guarantees: + - No personal data is collected + - No prompts, responses, or user content is tracked + - Only anonymous metrics about feature usage + - Respects DO_NOT_TRACK standard + - Can be disabled via environment variables + """ + + def __init__(self, enabled: bool = None): + """ + Initialize the minimal telemetry collector. + + Args: + enabled: Override the environment-based enable/disable setting + """ + # Respect explicit enabled parameter, otherwise check environment + if enabled is not None: + self.enabled = enabled + else: + self.enabled = not _TELEMETRY_DISABLED + + self.logger = logging.getLogger(__name__) + + if not self.enabled: + self.logger.debug("Telemetry is disabled") + return + + # Generate anonymous session ID (not user ID) + session_data = f"{datetime.now().isoformat()}-{os.getpid()}-{time.time()}" + self.session_id = hashlib.sha256(session_data.encode()).hexdigest()[:16] + + # Basic metrics storage + self._metrics = { + "agent_executions": 0, + "task_completions": 0, + "tool_calls": 0, + "errors": 0, + } + + # Collect basic environment info (anonymous) + self._environment = { + "python_version": platform.python_version(), + "os_type": platform.system(), + "framework_version": self._get_framework_version(), + } + + self.logger.debug(f"Telemetry enabled with session {self.session_id}") + + # Initialize PostHog if available + if POSTHOG_AVAILABLE: + try: + self._posthog = Posthog( + project_api_key='phc_skZpl3eFLQJ4iYjsERNMbCO6jfeSJi2vyZlPahKgxZ7', + host='https://eu.i.posthog.com', + disable_geoip=True + ) + except: + self._posthog = None + else: + self._posthog = None + + def _get_framework_version(self) -> str: + """Get the PraisonAI Agents version.""" + try: + from .. import __version__ + return __version__ + except ImportError: + return "unknown" + + def track_agent_execution(self, agent_name: str = None, success: bool = True): + """ + Track an agent execution event. + + Args: + agent_name: Name of the agent (not logged, just for counting) + success: Whether the execution was successful + """ + if not self.enabled: + return + + self._metrics["agent_executions"] += 1 + + # In a real implementation, this would send to a backend + # For now, just log at debug level + self.logger.debug(f"Agent execution tracked: success={success}") + + def track_task_completion(self, task_name: str = None, success: bool = True): + """ + Track a task completion event. + + Args: + task_name: Name of the task (not logged, just for counting) + success: Whether the task completed successfully + """ + if not self.enabled: + return + + self._metrics["task_completions"] += 1 + + self.logger.debug(f"Task completion tracked: success={success}") + + def track_tool_usage(self, tool_name: str, success: bool = True): + """ + Track tool usage event. + + Args: + tool_name: Name of the tool being used + success: Whether the tool call was successful + """ + if not self.enabled: + return + + self._metrics["tool_calls"] += 1 + + # Only track tool name, not arguments or results + self.logger.debug(f"Tool usage tracked: {tool_name}, success={success}") + + def track_error(self, error_type: str = None): + """ + Track an error event. + + Args: + error_type: Type of error (not the full message) + """ + if not self.enabled: + return + + self._metrics["errors"] += 1 + + # Only track error type, not full error messages + self.logger.debug(f"Error tracked: type={error_type or 'unknown'}") + + def track_feature_usage(self, feature_name: str): + """ + Track usage of a specific feature. + + Args: + feature_name: Name of the feature being used + """ + if not self.enabled: + return + + # Track which features are being used + self.logger.debug(f"Feature usage tracked: {feature_name}") + + def get_metrics(self) -> Dict[str, Any]: + """ + Get current metrics summary. + + Returns: + Dictionary of current metrics + """ + if not self.enabled: + return {"enabled": False} + + return { + "enabled": True, + "session_id": self.session_id, + "metrics": self._metrics.copy(), + "environment": self._environment.copy(), + } + + def flush(self): + """ + Flush any pending telemetry data. + + In a real implementation, this would send data to a backend. + """ + if not self.enabled: + return + + metrics = self.get_metrics() + self.logger.debug(f"Telemetry flush: {metrics}") + + # Send to PostHog if available + if hasattr(self, '_posthog') and self._posthog: + + try: + self._posthog.capture( + distinct_id='anonymous', + event='sdk_used', + properties={ + 'version': self._environment['framework_version'], + 'os': platform.system(), + '$process_person_profile': False, + '$geoip_disable': True + } + ) + except: + pass + + # Reset counters + for key in self._metrics: + if isinstance(self._metrics[key], int): + self._metrics[key] = 0 + + +# Global telemetry instance +_telemetry_instance = None + + +def get_telemetry() -> MinimalTelemetry: + """ + Get the global telemetry instance. + + Returns: + The global MinimalTelemetry instance + """ + global _telemetry_instance + if _telemetry_instance is None: + _telemetry_instance = MinimalTelemetry() + return _telemetry_instance + + +def disable_telemetry(): + """Programmatically disable telemetry.""" + global _telemetry_instance + if _telemetry_instance: + _telemetry_instance.enabled = False + else: + _telemetry_instance = MinimalTelemetry(enabled=False) + + +def enable_telemetry(): + """Programmatically enable telemetry (if not disabled by environment).""" + global _telemetry_instance + if not _TELEMETRY_DISABLED: + if _telemetry_instance: + _telemetry_instance.enabled = True + else: + _telemetry_instance = MinimalTelemetry(enabled=True) + + +# For backward compatibility with existing code +class TelemetryCollector: + """Backward compatibility wrapper for the old TelemetryCollector interface.""" + + def __init__(self, backend: str = "minimal", service_name: str = "praisonai-agents", **kwargs): + self.telemetry = get_telemetry() + + def start(self): + """Start telemetry collection.""" + # No-op for minimal implementation + pass + + def stop(self): + """Stop telemetry collection and flush data.""" + self.telemetry.flush() + + def trace_agent_execution(self, agent_name: str, **attributes): + """Compatibility method for agent execution tracking.""" + from contextlib import contextmanager + + @contextmanager + def _trace(): + try: + yield None + self.telemetry.track_agent_execution(agent_name, success=True) + except Exception: + self.telemetry.track_agent_execution(agent_name, success=False) + raise + + return _trace() + + def trace_task_execution(self, task_name: str, agent_name: str = None, **attributes): + """Compatibility method for task execution tracking.""" + from contextlib import contextmanager + + @contextmanager + def _trace(): + try: + yield None + self.telemetry.track_task_completion(task_name, success=True) + except Exception: + self.telemetry.track_task_completion(task_name, success=False) + raise + + return _trace() + + def trace_tool_call(self, tool_name: str, **attributes): + """Compatibility method for tool call tracking.""" + from contextlib import contextmanager + + @contextmanager + def _trace(): + try: + yield None + self.telemetry.track_tool_usage(tool_name, success=True) + except Exception: + self.telemetry.track_tool_usage(tool_name, success=False) + raise + + return _trace() + + def trace_llm_call(self, model: str = None, **attributes): + """Compatibility method for LLM call tracking.""" + from contextlib import contextmanager + + @contextmanager + def _trace(): + # We don't track LLM calls in minimal telemetry + yield None + + return _trace() + + def record_tokens(self, prompt_tokens: int, completion_tokens: int, model: str = None): + """Compatibility method - we don't track token usage.""" + pass + + def record_cost(self, cost: float, model: str = None): + """Compatibility method - we don't track costs.""" + pass + + def get_metrics(self) -> Dict[str, Any]: + """Get current metrics.""" + return self.telemetry.get_metrics() \ No newline at end of file diff --git a/src/praisonai-agents/pyproject.toml b/src/praisonai-agents/pyproject.toml index 128806aad..b709731d9 100644 --- a/src/praisonai-agents/pyproject.toml +++ b/src/praisonai-agents/pyproject.toml @@ -14,7 +14,8 @@ dependencies = [ "pydantic", "rich", "openai", - "mcp>=1.6.0" + "mcp>=1.6.0", + "posthog>=4.0.0" ] [project.optional-dependencies] @@ -54,6 +55,11 @@ api = [ "uvicorn>=0.34.0" ] +# Telemetry dependencies +telemetry = [ + "posthog>=4.0.0" +] + # Combined features all = [ "praisonaiagents[memory]", @@ -61,7 +67,8 @@ all = [ "praisonaiagents[graph]", "praisonaiagents[llm]", "praisonaiagents[mcp]", - "praisonaiagents[api]" + "praisonaiagents[api]", + "praisonaiagents[telemetry]" ] [tool.setuptools.packages.find] diff --git a/src/praisonai-agents/telemetry_analysis.md b/src/praisonai-agents/telemetry_analysis.md new file mode 100644 index 000000000..b021e2cde --- /dev/null +++ b/src/praisonai-agents/telemetry_analysis.md @@ -0,0 +1,123 @@ +# Telemetry Analysis: Why PostHog Events Aren't Being Sent by Default + +## Executive Summary + +The telemetry system is implemented but **not actively integrated** into the PraisonAI Agents codebase. While PostHog is properly configured and functional, telemetry data is never sent because: + +1. **No automatic integration**: Agent and PraisonAIAgents classes don't use telemetry +2. **No automatic flush**: Events are collected but never sent to PostHog +3. **No lifecycle hooks**: No atexit handler or periodic flush mechanism + +## Current State + +### What's Working ✓ +- PostHog client is properly initialized with API key and host +- Telemetry can be enabled/disabled via environment variables +- Privacy-first design with anonymous tracking +- Manual telemetry tracking and flushing works correctly + +### What's Not Working ✗ +- Agent class doesn't integrate telemetry +- PraisonAIAgents class doesn't integrate telemetry +- No automatic flush() calls anywhere in the codebase +- Integration module (integration.py) is not used + +## Technical Analysis + +### 1. PostHog Configuration (telemetry.py) +```python +# Lines 84-93: PostHog is initialized correctly +if POSTHOG_AVAILABLE: + try: + self._posthog = Posthog( + project_api_key='phc_skZpl3eFLQJ4iYjsERNMbCO6jfeSJi2vyZlPahKgxZ7', + host='https://eu.i.posthog.com' + ) + except: + self._posthog = None +``` + +### 2. Flush Implementation (telemetry.py) +```python +# Lines 208-224: flush() sends events but is never called automatically +def flush(self): + if hasattr(self, '_posthog') and self._posthog: + try: + self._posthog.capture( + distinct_id='anonymous', + event='sdk_used', + properties={...} + ) + self._posthog.capture('test-id', 'test-event') + except: + pass +``` + +### 3. Missing Integration Points + +#### Agent Class (agent/agent.py) +- No telemetry imports +- No telemetry initialization in __init__ +- No telemetry tracking in start(), run(), or execute_tool() + +#### PraisonAIAgents Class (agents/agents.py) +- No telemetry imports +- No telemetry initialization +- No telemetry tracking in start() or workflow execution + +## Root Causes + +1. **Incomplete Implementation**: The telemetry system was built but never integrated into the main classes +2. **No Automatic Lifecycle Management**: No atexit handler or periodic flush +3. **Integration Module Not Used**: `integration.py` has the code but it's never imported/called + +## Recommendations + +### Immediate Fixes + +1. **Add automatic integration to Agent class**: +```python +# In agent/agent.py __init__ +from ..telemetry import get_telemetry +self._telemetry = get_telemetry() + +# In start() method +if self._telemetry and self._telemetry.enabled: + self._telemetry.track_agent_execution(self.name, success=True) +``` + +2. **Add atexit handler in telemetry.py**: +```python +import atexit + +def _flush_on_exit(): + if _telemetry_instance: + _telemetry_instance.flush() + +atexit.register(_flush_on_exit) +``` + +3. **Add periodic flush**: +```python +# Flush after N events or T seconds +if self._metrics["total_events"] >= 100: + self.flush() +``` + +### Long-term Improvements + +1. **Make telemetry opt-in by default** with clear documentation +2. **Add telemetry configuration options** (flush interval, batch size) +3. **Implement proper error handling** for network failures +4. **Add telemetry dashboard** for users to see their usage + +## Testing + +Created test files demonstrate: +- `test_posthog.py`: PostHog is working correctly +- `test_posthog_detailed.py`: Events are sent when flush() is called +- `test_telemetry_integration.py`: Integration is missing + +## Conclusion + +The telemetry infrastructure is well-designed but incomplete. The main issue is that telemetry code exists in isolation and is never called by the core Agent/PraisonAIAgents classes. Additionally, even if it were integrated, the lack of automatic flush mechanisms means data would never reach PostHog. \ No newline at end of file diff --git a/src/praisonai-agents/telemetry_example.py b/src/praisonai-agents/telemetry_example.py new file mode 100644 index 000000000..0b18c1bbc --- /dev/null +++ b/src/praisonai-agents/telemetry_example.py @@ -0,0 +1,86 @@ +#!/usr/bin/env python3 +""" +Example demonstrating the minimal telemetry implementation. +""" + +import os +from praisonaiagents.telemetry import get_telemetry, disable_telemetry, enable_telemetry + +# Example 1: Default telemetry (enabled unless disabled by environment) +print("=== Example 1: Default Telemetry ===") +telemetry = get_telemetry() +print(f"Telemetry enabled: {telemetry.enabled}") + +# Track some events +telemetry.track_agent_execution("TestAgent", success=True) +telemetry.track_task_completion("TestTask", success=True) +telemetry.track_tool_usage("calculator", success=True) +telemetry.track_error("ValueError") +telemetry.track_feature_usage("memory") + +# Get metrics +metrics = telemetry.get_metrics() +print(f"Current metrics: {metrics}") + +# Example 2: Programmatically disable telemetry +print("\n=== Example 2: Disable Telemetry ===") +disable_telemetry() +telemetry = get_telemetry() +print(f"Telemetry enabled: {telemetry.enabled}") + +# These won't be tracked +telemetry.track_agent_execution("TestAgent2", success=True) +metrics = telemetry.get_metrics() +print(f"Metrics after disable: {metrics}") + +# Example 3: Re-enable telemetry +print("\n=== Example 3: Re-enable Telemetry ===") +enable_telemetry() +telemetry = get_telemetry() +print(f"Telemetry enabled: {telemetry.enabled}") + +# Example 4: Test with environment variable +print("\n=== Example 4: Environment Variable Opt-out ===") +# Simulate environment variable being set +os.environ['PRAISONAI_TELEMETRY_DISABLED'] = 'true' + +# Need to create a new instance to pick up the environment change +from importlib import reload +import praisonaiagents.telemetry.telemetry as telemetry_module +reload(telemetry_module) + +from praisonaiagents.telemetry import get_telemetry as get_new_telemetry +new_telemetry = get_new_telemetry() +print(f"Telemetry enabled with env var: {new_telemetry.enabled}") + +# Clean up +del os.environ['PRAISONAI_TELEMETRY_DISABLED'] + +# Example 5: Backward compatibility with TelemetryCollector +print("\n=== Example 5: Backward Compatibility ===") +from praisonaiagents.telemetry.telemetry import TelemetryCollector + +collector = TelemetryCollector() +collector.start() + +# Use context managers (backward compatible interface) +with collector.trace_agent_execution("CompatAgent"): + print("Executing agent...") + +with collector.trace_tool_call("web_search"): + print("Calling tool...") + +# Get metrics through collector +collector_metrics = collector.get_metrics() +print(f"Collector metrics: {collector_metrics}") + +collector.stop() + +print("\n=== Telemetry Example Complete ===") +print("\nPrivacy Notes:") +print("- No personal data, prompts, or responses are collected") +print("- Only anonymous usage metrics are tracked") +print("- Telemetry can be disabled via environment variables:") +print(" - PRAISONAI_TELEMETRY_DISABLED=true") +print(" - PRAISONAI_DISABLE_TELEMETRY=true") +print(" - DO_NOT_TRACK=true") \ No newline at end of file diff --git a/src/praisonai-agents/telemetry_minimal.py b/src/praisonai-agents/telemetry_minimal.py new file mode 100644 index 000000000..2ce49f1e4 --- /dev/null +++ b/src/praisonai-agents/telemetry_minimal.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python3 +""" +Minimal example showing telemetry integration with agents. +""" + +import os +# Uncomment to disable telemetry +# os.environ['PRAISONAI_TELEMETRY_DISABLED'] = 'true' + +from praisonaiagents import Agent, Task, PraisonAIAgents +from praisonaiagents.telemetry import get_telemetry + +# Create a simple agent +agent = Agent( + name="Calculator", + role="Math Expert", + goal="Perform calculations", + instructions="You are a helpful math expert." +) + +# Create a task +task = Task( + description="Calculate 2 + 2", + expected_output="The sum", + agent=agent +) + +# Create and run workflow +workflow = PraisonAIAgents( + agents=[agent], + tasks=[task], + process="sequential" +) + +print("Running workflow with telemetry...") +result = workflow.start() +print(f"Result: {result}") + +# Check telemetry metrics +telemetry = get_telemetry() +if telemetry.enabled: + metrics = telemetry.get_metrics() + print(f"\nTelemetry metrics collected:") + print(f"- Agent executions: {metrics['metrics']['agent_executions']}") + print(f"- Task completions: {metrics['metrics']['task_completions']}") + print(f"- Errors: {metrics['metrics']['errors']}") + print(f"- Session ID: {metrics['session_id']}") +else: + print("\nTelemetry is disabled") + +print("\nTo disable telemetry, set any of these environment variables:") +print("- PRAISONAI_TELEMETRY_DISABLED=true") +print("- DO_NOT_TRACK=true") \ No newline at end of file diff --git a/src/praisonai-agents/test.py b/src/praisonai-agents/test.py index 1bc49e131..d083cdd19 100644 --- a/src/praisonai-agents/test.py +++ b/src/praisonai-agents/test.py @@ -15,7 +15,7 @@ role="Blog Writer", goal="Write a blog post about AI", backstory="Expert at writing blog posts", - llm="gpt-4o-mini", + llm=llm_config, ) blog_task = Task( @@ -27,7 +27,7 @@ agents = PraisonAIAgents( agents=[blog_agent], tasks=[blog_task], - memory=True + memory=False ) result = agents.start() diff --git a/src/praisonai-agents/test_auto_telemetry_final.py b/src/praisonai-agents/test_auto_telemetry_final.py new file mode 100644 index 000000000..173a24435 --- /dev/null +++ b/src/praisonai-agents/test_auto_telemetry_final.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python3 +""" +Final test of automatic telemetry. +""" + +print("Step 1: Import praisonaiagents...") +from praisonaiagents import Agent, Task, PraisonAIAgents, get_telemetry + +print("\nStep 2: Check if telemetry is initialized...") +telemetry = get_telemetry() +print(f" Telemetry enabled: {telemetry.enabled}") +print(f" PostHog available: {telemetry._posthog is not None}") + +print("\nStep 3: Check if Agent class is instrumented...") +print(f" Agent.__init__.__name__: {Agent.__init__.__name__}") + +print("\nStep 4: Create an agent...") +agent = Agent(name="Test", role="Test", goal="Test", instructions="Test") +print(f" agent.chat wrapped: {hasattr(agent.chat, '__wrapped__')}") + +print("\nStep 5: Create and run a simple workflow...") +task = Task(description="Say hello", expected_output="A greeting", agent=agent) +workflow = PraisonAIAgents(agents=[agent], tasks=[task], process="sequential") + +# Check workflow instrumentation +print(f" workflow.execute_task wrapped: {hasattr(workflow.execute_task, '__wrapped__')}") + +result = workflow.start() +print(f"\nResult: {result}") + +print("\nStep 6: Check metrics...") +metrics = telemetry.get_metrics() +print(f" Metrics: {metrics['metrics']}") + +if metrics['metrics']['agent_executions'] > 0: + print("\n✅ Automatic telemetry is working!") +else: + print("\n❌ Automatic telemetry is NOT working") + print("\nPossible reason: The auto_instrument_all() in __init__.py might be") + print("running before the telemetry module's lazy initialization completes.") \ No newline at end of file diff --git a/src/praisonai-agents/test_import_order.py b/src/praisonai-agents/test_import_order.py new file mode 100644 index 000000000..e008f8af7 --- /dev/null +++ b/src/praisonai-agents/test_import_order.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python3 +""" +Test import order to debug telemetry availability. +""" + +import sys + +print("1. Importing telemetry module directly...") +try: + import praisonaiagents.telemetry + print(" ✅ Telemetry module imported successfully") +except Exception as e: + print(f" ❌ Failed: {type(e).__name__}: {e}") + +print("\n2. Importing main praisonaiagents...") +try: + import praisonaiagents + print(" ✅ Main module imported successfully") + print(f" _telemetry_available: {praisonaiagents._telemetry_available}") +except Exception as e: + print(f" ❌ Failed: {type(e).__name__}: {e}") + +print("\n3. Checking what prevented telemetry import...") +# The main __init__.py tries to import from .telemetry +# Let's see if we can import the functions directly +try: + from praisonaiagents.telemetry import ( + get_telemetry, + enable_telemetry, + disable_telemetry, + MinimalTelemetry, + TelemetryCollector + ) + print(" ✅ All telemetry functions imported successfully") +except Exception as e: + print(f" ❌ Failed to import telemetry functions: {type(e).__name__}: {e}") + import traceback + traceback.print_exc() \ No newline at end of file diff --git a/src/praisonai-agents/test_manual_instrumentation.py b/src/praisonai-agents/test_manual_instrumentation.py new file mode 100644 index 000000000..ff0311831 --- /dev/null +++ b/src/praisonai-agents/test_manual_instrumentation.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python3 +""" +Test manual instrumentation to verify it works. +""" + +from praisonaiagents import Agent, Task, PraisonAIAgents +from praisonaiagents.telemetry import get_telemetry +from praisonaiagents.telemetry.integration import auto_instrument_all + +# Manually call auto_instrument_all AFTER importing classes +print("1. Calling auto_instrument_all()...") +auto_instrument_all() + +print("\n2. Creating agent and task...") +agent = Agent( + name="Calculator", + role="Math Expert", + goal="Perform calculations", + instructions="You are a helpful math expert." +) + +task = Task( + description="Calculate 3 + 3", + expected_output="The sum", + agent=agent +) + +print("\n3. Creating workflow...") +workflow = PraisonAIAgents( + agents=[agent], + tasks=[task], + process="sequential" +) + +print("\n4. Running workflow...") +result = workflow.start() +print(f"Result: {result}") + +print("\n5. Checking telemetry metrics...") +telemetry = get_telemetry() +metrics = telemetry.get_metrics() +print(f"Telemetry metrics: {metrics['metrics']}") +print(f"PostHog available: {telemetry._posthog is not None}") + +if metrics['metrics']['agent_executions'] > 0: + print("\n✅ SUCCESS! Telemetry is working correctly.") + print(" Data will be sent to PostHog on program exit.") +else: + print("\n❌ FAILED! Telemetry metrics are still 0.") + + # Additional debugging + print("\nDebugging info:") + print(f" agent.chat wrapped: {hasattr(agent.chat, '__wrapped__')}") + print(f" workflow.execute_task wrapped: {hasattr(workflow.execute_task, '__wrapped__')}") \ No newline at end of file diff --git a/src/praisonai-agents/test_posthog.py b/src/praisonai-agents/test_posthog.py new file mode 100644 index 000000000..2a55fc9ac --- /dev/null +++ b/src/praisonai-agents/test_posthog.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python3 +""" +Test PostHog integration in telemetry. +""" + +import os +import sys + +# Ensure telemetry is enabled +if 'PRAISONAI_TELEMETRY_DISABLED' in os.environ: + del os.environ['PRAISONAI_TELEMETRY_DISABLED'] +if 'PRAISONAI_DISABLE_TELEMETRY' in os.environ: + del os.environ['PRAISONAI_DISABLE_TELEMETRY'] +if 'DO_NOT_TRACK' in os.environ: + del os.environ['DO_NOT_TRACK'] + +# Test PostHog availability +print("=== Testing PostHog Integration ===") +print("\n1. Checking PostHog import:") +try: + from posthog import Posthog + print("✓ PostHog is available") +except ImportError as e: + print("✗ PostHog is NOT available") + print(f" Error: {e}") + print("\n To fix: pip install posthog") + sys.exit(1) + +# Test telemetry initialization +print("\n2. Testing telemetry initialization:") +from praisonaiagents.telemetry import get_telemetry + +telemetry = get_telemetry() +print(f"✓ Telemetry enabled: {telemetry.enabled}") +print(f"✓ Session ID: {telemetry.session_id}") + +# Check PostHog client +print("\n3. Checking PostHog client:") +if hasattr(telemetry, '_posthog') and telemetry._posthog: + print("✓ PostHog client is initialized") + print(f" API Key: {telemetry._posthog.api_key[:10]}...") + print(f" Host: {telemetry._posthog.host}") +else: + print("✗ PostHog client is NOT initialized") + +# Test tracking +print("\n4. Testing event tracking:") +telemetry.track_agent_execution("TestAgent", success=True) +telemetry.track_task_completion("TestTask", success=True) +telemetry.track_tool_usage("test_tool", success=True) +print("✓ Events tracked locally") + +# Test flush (which should send to PostHog) +print("\n5. Testing flush (should send to PostHog):") +telemetry.flush() +print("✓ Flush completed") + +# Check metrics +print("\n6. Current metrics:") +metrics = telemetry.get_metrics() +for key, value in metrics.items(): + print(f" {key}: {value}") + +print("\n=== PostHog Test Complete ===") \ No newline at end of file diff --git a/src/praisonai-agents/test_posthog_detailed.py b/src/praisonai-agents/test_posthog_detailed.py new file mode 100644 index 000000000..356ac4242 --- /dev/null +++ b/src/praisonai-agents/test_posthog_detailed.py @@ -0,0 +1,139 @@ +#!/usr/bin/env python3 +""" +Detailed test of PostHog integration showing why events might not be sent. +""" + +import os +import sys +import time + +# Ensure telemetry is enabled +for var in ['PRAISONAI_TELEMETRY_DISABLED', 'PRAISONAI_DISABLE_TELEMETRY', 'DO_NOT_TRACK']: + if var in os.environ: + del os.environ[var] + +print("=== Detailed PostHog Test ===") + +# Test PostHog directly +print("\n1. Testing PostHog directly:") +try: + from posthog import Posthog + + # Create a PostHog client directly + posthog_client = Posthog( + project_api_key='phc_skZpl3eFLQJ4iYjsERNMbCO6jfeSJi2vyZlPahKgxZ7', + host='https://eu.i.posthog.com' + ) + + # Send a test event + print(" Sending test event...") + posthog_client.capture( + distinct_id='test-user-123', + event='direct_test_event', + properties={ + 'test': True, + 'timestamp': time.time() + } + ) + + # Flush to ensure event is sent + print(" Flushing...") + posthog_client.flush() + + print("✓ Direct PostHog test completed") + +except Exception as e: + print(f"✗ Direct PostHog test failed: {e}") + import traceback + traceback.print_exc() + +# Test telemetry integration +print("\n2. Testing telemetry integration:") +from praisonaiagents.telemetry.telemetry import MinimalTelemetry + +# Create a new telemetry instance with debug output +class DebugTelemetry(MinimalTelemetry): + def flush(self): + """Override flush to add debug output.""" + if not self.enabled: + print(" [DEBUG] Telemetry disabled, skipping flush") + return + + metrics = self.get_metrics() + print(f" [DEBUG] Flushing metrics: {metrics}") + + # Check PostHog client + if hasattr(self, '_posthog') and self._posthog: + print(" [DEBUG] PostHog client exists") + print(f" [DEBUG] PostHog API key: {self._posthog.api_key[:20]}...") + print(f" [DEBUG] PostHog host: {self._posthog.host}") + + try: + # Send events + print(" [DEBUG] Sending sdk_used event...") + self._posthog.capture( + distinct_id='anonymous', + event='sdk_used', + properties={ + 'version': self._environment['framework_version'], + 'os': self._environment['os_type'], + 'python_version': self._environment['python_version'], + 'session_id': self.session_id, + 'metrics': self._metrics, + '$process_person_profile': False + } + ) + + print(" [DEBUG] Sending test event...") + self._posthog.capture('test-id', 'test-event') + + # Explicitly flush PostHog + print(" [DEBUG] Flushing PostHog client...") + self._posthog.flush() + + print(" [DEBUG] PostHog flush completed") + except Exception as e: + print(f" [DEBUG] PostHog error: {e}") + import traceback + traceback.print_exc() + else: + print(" [DEBUG] No PostHog client available") + + # Reset counters + for key in self._metrics: + if isinstance(self._metrics[key], int): + self._metrics[key] = 0 + +# Create debug telemetry instance +telemetry = DebugTelemetry(enabled=True) + +# Track some events +print("\n3. Tracking events:") +telemetry.track_agent_execution("TestAgent", success=True) +telemetry.track_task_completion("TestTask", success=True) +telemetry.track_tool_usage("calculator", success=True) +print("✓ Events tracked") + +# Flush with debug output +print("\n4. Flushing telemetry:") +telemetry.flush() + +# Wait a moment for async operations +print("\n5. Waiting for async operations...") +time.sleep(2) + +print("\n=== Test Complete ===") + +# Additional debug info +print("\n6. Debug information:") +print(f" POSTHOG_AVAILABLE: {telemetry.logger.parent.manager.loggerDict.get('posthog', 'Not found')}") +print(f" Telemetry module location: {MinimalTelemetry.__module__}") + +# Check if PostHog is properly installed +print("\n7. PostHog installation check:") +try: + import posthog + print(f" PostHog version: {posthog.VERSION}") + print(f" PostHog location: {posthog.__file__}") +except Exception as e: + print(f" PostHog check failed: {e}") \ No newline at end of file diff --git a/src/praisonai-agents/test_posthog_direct.py b/src/praisonai-agents/test_posthog_direct.py new file mode 100644 index 000000000..b339b9c7c --- /dev/null +++ b/src/praisonai-agents/test_posthog_direct.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python3 +""" +Test PostHog integration directly to verify it's working. +""" + +import os +from praisonaiagents.telemetry import get_telemetry +from praisonaiagents.telemetry.integration import auto_instrument_all + +# Enable automatic telemetry instrumentation +auto_instrument_all() + +telemetry = get_telemetry() +print(f"Telemetry enabled: {telemetry.enabled}") +print(f"PostHog available: {telemetry._posthog is not None}") + +if telemetry.enabled and telemetry._posthog: + # Manually track some events + telemetry.track_agent_execution("TestAgent", success=True) + telemetry.track_task_completion("TestTask", success=True) + telemetry.track_tool_usage("TestTool") + + # Get metrics before flush + metrics = telemetry.get_metrics() + print(f"\nMetrics before flush:") + print(f"- Agent executions: {metrics['metrics']['agent_executions']}") + print(f"- Task completions: {metrics['metrics']['task_completions']}") + print(f"- Tool calls: {metrics['metrics']['tool_calls']}") + + # Manually flush to send to PostHog + print("\nFlushing telemetry to PostHog...") + telemetry.flush() + + # Get metrics after flush (should be reset) + metrics = telemetry.get_metrics() + print(f"\nMetrics after flush (should be reset):") + print(f"- Agent executions: {metrics['metrics']['agent_executions']}") + print(f"- Task completions: {metrics['metrics']['task_completions']}") + print(f"- Tool calls: {metrics['metrics']['tool_calls']}") + + print("\n✅ If no errors above, PostHog integration is working!") +else: + print("\n❌ Telemetry or PostHog is not available") \ No newline at end of file diff --git a/src/praisonai-agents/test_posthog_error.py b/src/praisonai-agents/test_posthog_error.py new file mode 100644 index 000000000..21b7f2aba --- /dev/null +++ b/src/praisonai-agents/test_posthog_error.py @@ -0,0 +1,20 @@ +#!/usr/bin/env python3 +""" +Test PostHog initialization error. +""" + +from posthog import Posthog + +print("Testing PostHog initialization with telemetry parameters...") +try: + posthog_client = Posthog( + project_api_key='phc_skZpl3eFLQJ4iYjsERNMbCO6jfeSJi2vyZlPahKgxZ7', + host='https://eu.i.posthog.com', + disable_geoip=True, + events_to_ignore=['test-event'] + ) + print(f"✅ PostHog client created: {posthog_client}") +except Exception as e: + print(f"❌ PostHog initialization failed: {type(e).__name__}: {e}") + import traceback + traceback.print_exc() \ No newline at end of file diff --git a/src/praisonai-agents/test_posthog_import.py b/src/praisonai-agents/test_posthog_import.py new file mode 100644 index 000000000..8f9a569d7 --- /dev/null +++ b/src/praisonai-agents/test_posthog_import.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python3 +""" +Test PostHog import and initialization. +""" + +print("1. Testing PostHog import...") +try: + from posthog import Posthog + print("✅ PostHog imported successfully") +except ImportError as e: + print(f"❌ PostHog import failed: {e}") + exit(1) + +print("\n2. Testing PostHog initialization...") +try: + posthog_client = Posthog( + project_api_key='phc_skZpl3eFLQJ4iYjsERNMbCO6jfeSJi2vyZlPahKgxZ7', + host='https://eu.i.posthog.com' + ) + print("✅ PostHog client created successfully") +except Exception as e: + print(f"❌ PostHog initialization failed: {e}") + exit(1) + +print("\n3. Testing PostHog capture...") +try: + posthog_client.capture( + distinct_id='test-user', + event='test-event', + properties={'test': True} + ) + print("✅ PostHog capture successful") +except Exception as e: + print(f"❌ PostHog capture failed: {e}") + +print("\n4. Testing telemetry module...") +from praisonaiagents.telemetry.telemetry import MinimalTelemetry, POSTHOG_AVAILABLE +print(f"POSTHOG_AVAILABLE in telemetry module: {POSTHOG_AVAILABLE}") + +print("\n5. Creating MinimalTelemetry instance...") +telemetry = MinimalTelemetry(enabled=True) +print(f"Telemetry enabled: {telemetry.enabled}") +print(f"Telemetry _posthog: {telemetry._posthog}") + +print("\n6. Checking get_telemetry()...") +from praisonaiagents.telemetry import get_telemetry +global_telemetry = get_telemetry() +print(f"Global telemetry enabled: {global_telemetry.enabled}") +print(f"Global telemetry _posthog: {global_telemetry._posthog}") +print(f"Global telemetry session_id: {global_telemetry.session_id}") \ No newline at end of file diff --git a/src/praisonai-agents/test_telemetry_automatic.py b/src/praisonai-agents/test_telemetry_automatic.py new file mode 100644 index 000000000..abf4aa23c --- /dev/null +++ b/src/praisonai-agents/test_telemetry_automatic.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python3 +""" +Test that telemetry works automatically without manual instrumentation. +""" + +# NO manual telemetry setup - it should work automatically! +from praisonaiagents import Agent, Task, PraisonAIAgents + +# Create a simple agent +agent = Agent( + name="AutoTelemetryTest", + role="Math Expert", + goal="Perform calculations", + instructions="You are a helpful math expert." +) + +# Create a task +task = Task( + description="Calculate 5 + 5", + expected_output="The sum", + agent=agent +) + +# Create and run workflow +workflow = PraisonAIAgents( + agents=[agent], + tasks=[task], + process="sequential" +) + +print("Running workflow (telemetry should be automatic)...") +result = workflow.start() +print(f"Result: {result}") + +# Check if telemetry was collected +from praisonaiagents.telemetry import get_telemetry +telemetry = get_telemetry() + +if telemetry.enabled: + metrics = telemetry.get_metrics() + print(f"\n✅ Telemetry is working automatically!") + print(f"- Agent executions: {metrics['metrics']['agent_executions']}") + print(f"- Task completions: {metrics['metrics']['task_completions']}") + print(f"- Session ID: {metrics['session_id']}") + print("\nTelemetry data will be sent to PostHog on program exit.") +else: + print("\n❌ Telemetry is disabled") \ No newline at end of file diff --git a/src/praisonai-agents/test_telemetry_integration.py b/src/praisonai-agents/test_telemetry_integration.py new file mode 100644 index 000000000..0bc224a8d --- /dev/null +++ b/src/praisonai-agents/test_telemetry_integration.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python3 +""" +Test to demonstrate why telemetry isn't being sent by default. +""" + +import os + +# Ensure telemetry is enabled +for var in ['PRAISONAI_TELEMETRY_DISABLED', 'PRAISONAI_DISABLE_TELEMETRY', 'DO_NOT_TRACK']: + if var in os.environ: + del os.environ[var] + +print("=== Testing Telemetry Integration ===") + +# Test 1: Default behavior (no telemetry) +print("\n1. Testing default Agent behavior:") +from praisonaiagents import Agent, Task + +agent = Agent( + name="TestAgent", + role="Tester", + goal="Test telemetry", + backstory="I test things", + llm="gpt-4o-mini" +) + +task = Task( + description="Say hello", + expected_output="A greeting", + agent=agent +) + +# Check if agent has telemetry +print(f" Agent has telemetry: {hasattr(agent, '_telemetry')}") +print(f" Agent methods: {[m for m in dir(agent) if 'telemetry' in m.lower()]}") + +# Test 2: Manual telemetry integration +print("\n2. Testing manual telemetry integration:") +from praisonaiagents.telemetry import get_telemetry +from praisonaiagents.telemetry.integration import instrument_agent + +telemetry = get_telemetry() +print(f" Telemetry enabled: {telemetry.enabled}") + +# Manually instrument the agent +instrument_agent(agent, telemetry) +print(" Agent instrumented manually") + +# Test 3: Auto-instrumentation +print("\n3. Testing auto-instrumentation:") +from praisonaiagents.telemetry.integration import auto_instrument_all + +# Enable auto-instrumentation +auto_instrument_all(telemetry) +print(" Auto-instrumentation enabled") + +# Create a new agent to test auto-instrumentation +new_agent = Agent( + name="AutoInstrumentedAgent", + role="Tester", + goal="Test auto telemetry", + backstory="I test auto-instrumentation", + llm="gpt-4o-mini" +) + +print(f" New agent instrumented: {hasattr(new_agent.execute, '__wrapped__')}") + +# Test 4: Check if telemetry data is collected +print("\n4. Checking telemetry metrics:") +initial_metrics = telemetry.get_metrics() +print(f" Initial metrics: {initial_metrics['metrics']}") + +# The problem: telemetry is never flushed automatically! +print("\n5. The Issue:") +print(" ✗ Telemetry is implemented but NOT automatically integrated") +print(" ✗ Even if integrated, flush() is never called automatically") +print(" ✗ PostHog events are only sent when flush() is explicitly called") + +# Test 5: Solution - manual flush +print("\n6. Solution - Manual flush:") +telemetry.flush() +print(" ✓ Manual flush called - events sent to PostHog") + +# Test 6: Workflow integration +print("\n7. Testing workflow integration:") +from praisonaiagents import PraisonAIAgents + +agents = [ + Agent(name="Agent1", role="First", goal="Do first task", backstory="I'm first"), + Agent(name="Agent2", role="Second", goal="Do second task", backstory="I'm second") +] + +tasks = [ + Task(description="Task 1", expected_output="Output 1", agent=agents[0]), + Task(description="Task 2", expected_output="Output 2", agent=agents[1]) +] + +workflow = PraisonAIAgents( + agents=agents, + tasks=tasks, + process="sequential" +) + +print(f" Workflow has telemetry integration: {hasattr(workflow.start, '__wrapped__')}") + +print("\n=== Summary of Issues ===") +print("\n1. No automatic integration:") +print(" - Agent and PraisonAIAgents classes don't automatically use telemetry") +print(" - Need to manually call instrument_agent() or auto_instrument_all()") + +print("\n2. No automatic flush:") +print(" - Even when integrated, telemetry data is only collected locally") +print(" - flush() must be called explicitly to send data to PostHog") +print(" - No automatic flush on program exit or periodic flush") + +print("\n3. Missing integration points:") +print(" - Agent.__init__ doesn't initialize telemetry") +print(" - Agent.execute() doesn't track execution") +print(" - PraisonAIAgents doesn't track workflow execution") +print(" - No atexit handler to flush on program termination") + +print("\n=== Recommendations ===") +print("\n1. Add automatic integration in Agent.__init__ and PraisonAIAgents.__init__") +print("2. Add atexit handler to flush telemetry on program exit") +print("3. Add periodic flush (e.g., every 100 events or 60 seconds)") +print("4. Document telemetry behavior clearly for users") \ No newline at end of file diff --git a/src/praisonai-agents/test_telemetry_simple.py b/src/praisonai-agents/test_telemetry_simple.py new file mode 100644 index 000000000..136795876 --- /dev/null +++ b/src/praisonai-agents/test_telemetry_simple.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python3 +""" +Simple test to check telemetry. +""" + +from praisonaiagents import Agent, Task, PraisonAIAgents +from praisonaiagents.telemetry import get_telemetry + +# Create workflow +agent = Agent(name="Test", role="Tester", goal="Test", instructions="Test") +task = Task(description="Test", expected_output="Test", agent=agent) +workflow = PraisonAIAgents(agents=[agent], tasks=[task], process="sequential") + +# Run workflow +print("Running workflow...") +result = workflow.start() + +# Check telemetry +telemetry = get_telemetry() +metrics = telemetry.get_metrics() +print(f"\nTelemetry metrics: {metrics['metrics']}") +print(f"Session ID: {metrics['session_id']}") + +# Check if telemetry will be sent +print(f"\nTelemetry enabled: {telemetry.enabled}") +print(f"PostHog client available: {telemetry._posthog is not None}") + +if telemetry._posthog: + print("\n✅ Telemetry will be sent to PostHog on program exit") \ No newline at end of file diff --git a/src/praisonai-agents/tests/state_based_workflow_example.py b/src/praisonai-agents/tests/state_based_workflow_example.py new file mode 100644 index 000000000..20b2384bb --- /dev/null +++ b/src/praisonai-agents/tests/state_based_workflow_example.py @@ -0,0 +1,294 @@ +""" +State-Based Workflow Control Example +=================================== + +This example shows how to use state for: +1. Conditional task execution based on state +2. Loop control using state +3. Dynamic workflow modification +4. State-based error handling and recovery +""" + +from praisonaiagents import Agent, Task, PraisonAIAgents +from typing import Dict, Any, List +import random +import json + +# Tool functions that use state for control flow +def analyze_data_quality() -> Dict[str, Any]: + """Analyze data and set quality state""" + # Simulate data quality check + quality_score = random.uniform(0.5, 1.0) + has_errors = random.choice([True, False]) + + # Set state based on analysis + workflow.set_state("quality_score", quality_score) + workflow.set_state("has_errors", has_errors) + workflow.set_state("data_status", "analyzed") + + if quality_score < 0.7: + workflow.set_state("quality_level", "poor") + elif quality_score < 0.85: + workflow.set_state("quality_level", "moderate") + else: + workflow.set_state("quality_level", "good") + + return { + "quality_score": quality_score, + "has_errors": has_errors, + "quality_level": workflow.get_state("quality_level"), + "recommendation": "clean_data" if has_errors or quality_score < 0.8 else "proceed" + } + +def clean_data_based_on_state() -> Dict[str, Any]: + """Clean data based on current state""" + quality_score = workflow.get_state("quality_score", 0) + has_errors = workflow.get_state("has_errors", False) + cleaning_attempts = workflow.get_state("cleaning_attempts", 0) + + # Increment cleaning attempts + workflow.set_state("cleaning_attempts", cleaning_attempts + 1) + + # Simulate cleaning improvement + improvement = random.uniform(0.1, 0.2) + new_quality = min(quality_score + improvement, 0.95) + + workflow.set_state("quality_score", new_quality) + workflow.set_state("has_errors", False) + workflow.set_state("last_cleaning_improvement", improvement) + + # Determine if more cleaning needed + if new_quality < 0.8 and cleaning_attempts < 3: + status = "needs_more_cleaning" + else: + status = "cleaning_complete" + workflow.set_state("data_status", "cleaned") + + return { + "previous_score": quality_score, + "new_score": new_quality, + "improvement": improvement, + "attempts": cleaning_attempts + 1, + "status": status + } + +def process_batch_with_state() -> Dict[str, Any]: + """Process data in batches using state to track progress""" + # Initialize batch processing state + if not workflow.has_state("batch_total"): + batch_total = random.randint(5, 10) + workflow.set_state("batch_total", batch_total) + workflow.set_state("batch_current", 0) + workflow.set_state("batch_results", []) + + # Get current state + batch_total = workflow.get_state("batch_total") + batch_current = workflow.get_state("batch_current") + batch_results = workflow.get_state("batch_results", []) + + # Process current batch + batch_current += 1 + result = { + "batch_number": batch_current, + "records_processed": random.randint(100, 500), + "errors": random.randint(0, 5) + } + batch_results.append(result) + + # Update state + workflow.set_state("batch_current", batch_current) + workflow.set_state("batch_results", batch_results) + + # Determine if more batches + if batch_current < batch_total: + status = "more_batches" + workflow.set_state("batch_status", "in_progress") + else: + status = "all_batches_complete" + workflow.set_state("batch_status", "completed") + workflow.set_state("total_records", sum(r["records_processed"] for r in batch_results)) + workflow.set_state("total_errors", sum(r["errors"] for r in batch_results)) + + return { + "batch": result, + "progress": f"{batch_current}/{batch_total}", + "status": status, + "remaining": batch_total - batch_current + } + +def generate_report_from_state() -> Dict[str, Any]: + """Generate comprehensive report from accumulated state""" + # Gather all relevant state + report = { + "data_quality": { + "initial_score": workflow.get_state("quality_score", 0), + "quality_level": workflow.get_state("quality_level", "unknown"), + "cleaning_attempts": workflow.get_state("cleaning_attempts", 0), + "final_status": workflow.get_state("data_status", "unknown") + }, + "batch_processing": { + "total_batches": workflow.get_state("batch_total", 0), + "completed_batches": workflow.get_state("batch_current", 0), + "total_records": workflow.get_state("total_records", 0), + "total_errors": workflow.get_state("total_errors", 0), + "status": workflow.get_state("batch_status", "not_started") + }, + "workflow_metadata": { + "has_errors": workflow.get_state("has_errors", False), + "state_keys": list(workflow.get_all_state().keys()), + "state_size": len(workflow.get_all_state()) + } + } + + # Calculate summary metrics + if workflow.has_state("batch_results"): + batch_results = workflow.get_state("batch_results") + report["batch_processing"]["average_records_per_batch"] = ( + sum(r["records_processed"] for r in batch_results) / len(batch_results) + if batch_results else 0 + ) + + # Set final state + workflow.set_state("final_report_generated", True) + workflow.set_state("workflow_complete", True) + + return report + +def check_state_conditions() -> str: + """Check various state conditions for decision making""" + quality_level = workflow.get_state("quality_level", "unknown") + cleaning_attempts = workflow.get_state("cleaning_attempts", 0) + has_errors = workflow.get_state("has_errors", False) + + if quality_level == "poor" and cleaning_attempts == 0: + return "needs_cleaning" + elif quality_level == "moderate" and not has_errors: + return "can_proceed" + elif quality_level == "good": + return "ready_for_processing" + elif cleaning_attempts >= 3: + return "max_cleaning_reached" + else: + return "needs_analysis" + +# Create agents +data_analyst = Agent( + name="DataAnalyst", + role="Data quality analysis", + goal="Analyze data quality and set appropriate state", + backstory="Expert in data quality assessment", + tools=[analyze_data_quality, check_state_conditions], + llm="gpt-4o-mini" +) + +data_engineer = Agent( + name="DataEngineer", + role="Data cleaning and processing", + goal="Clean data based on state and process in batches", + backstory="Specialist in data transformation and batch processing", + tools=[clean_data_based_on_state, process_batch_with_state], + llm="gpt-4o-mini" +) + +report_generator = Agent( + name="ReportGenerator", + role="Report generation", + goal="Generate comprehensive reports from workflow state", + backstory="Expert in creating detailed analytical reports", + tools=[generate_report_from_state], + llm="gpt-4o-mini" +) + +# Create tasks with state-based conditions +analyze_task = Task( + name="analyze_quality", + description="Analyze the data quality and set initial state values", + expected_output="Data quality analysis with recommendations", + agent=data_analyst, + tools=[analyze_data_quality] +) + +decision_task = Task( + name="quality_decision", + description="Check data quality state and decide next action using check_state_conditions tool", + expected_output="Decision on whether to clean data or proceed", + agent=data_analyst, + tools=[check_state_conditions], + task_type="decision", + condition={ + "needs_cleaning": ["clean_data"], + "can_proceed": ["process_batches"], + "ready_for_processing": ["process_batches"], + "max_cleaning_reached": ["process_batches"], + "needs_analysis": ["analyze_quality"] + }, + context=[analyze_task] +) + +clean_task = Task( + name="clean_data", + description="Clean the data to improve quality score. Check if more cleaning is needed.", + expected_output="Cleaning results with status", + agent=data_engineer, + tools=[clean_data_based_on_state], + task_type="decision", + condition={ + "needs_more_cleaning": ["clean_data"], + "cleaning_complete": ["process_batches"] + } +) + +process_task = Task( + name="process_batches", + description="Process data in batches. Continue until all batches are complete.", + expected_output="Batch processing results", + agent=data_engineer, + tools=[process_batch_with_state], + task_type="loop", + condition={ + "more_batches": ["process_batches"], + "all_batches_complete": ["generate_report"] + } +) + +report_task = Task( + name="generate_report", + description="Generate final report from all accumulated state data", + expected_output="Comprehensive workflow report", + agent=report_generator, + tools=[generate_report_from_state] +) + +# Create workflow +workflow = PraisonAIAgents( + agents=[data_analyst, data_engineer, report_generator], + tasks=[analyze_task, decision_task, clean_task, process_task, report_task], + verbose=1, + process="workflow" +) + +# Demonstrate state before workflow +print("\n=== State-Based Workflow Control Demo ===") +print("\n1. Initial state (empty):", workflow.get_all_state()) + +# Run the workflow +print("\n2. Running workflow with state-based decisions...") +result = workflow.start() + +# Show final state +print("\n3. Final workflow state:") +final_state = workflow.get_all_state() +print(f" Total state entries: {len(final_state)}") +print(f" Quality level: {final_state.get('quality_level')}") +print(f" Cleaning attempts: {final_state.get('cleaning_attempts', 0)}") +print(f" Batches processed: {final_state.get('batch_current', 0)}/{final_state.get('batch_total', 0)}") +print(f" Total records: {final_state.get('total_records', 0)}") +print(f" Workflow complete: {final_state.get('workflow_complete', False)}") + +# Display task execution path +print("\n4. Task Execution Path:") +for task_name, task in workflow.tasks.items(): + if hasattr(task, 'status'): + print(f" {task_name}: {task.status}") + +print("\n=== Demo Complete ===") \ No newline at end of file diff --git a/src/praisonai-agents/tests/state_management_example.py b/src/praisonai-agents/tests/state_management_example.py new file mode 100644 index 000000000..6ca198913 --- /dev/null +++ b/src/praisonai-agents/tests/state_management_example.py @@ -0,0 +1,259 @@ +""" +State Management Example for PraisonAI Agents +============================================ + +This example demonstrates various state management features: +1. Workflow-level state management +2. Session-level state persistence +3. State sharing between agents +4. State-based decision making +""" + +from praisonaiagents import Agent, Task, PraisonAIAgents, Session +from typing import Dict, Any +import json +import time + +# Create a session for persistent state +session = Session(session_id="state_demo_session", user_id="demo_user") + +# Tool functions that interact with state +def initialize_project_state() -> Dict[str, Any]: + """Initialize project state with default values""" + # Access workflow state from global workflow variable + workflow.set_state("project_name", "AI Assistant Project") + workflow.set_state("stage", "planning") + workflow.set_state("features", []) + workflow.set_state("completed_features", []) + workflow.set_state("budget", 100000) + workflow.set_state("spent", 0) + + # Also save to session for persistence + session.set_state("last_project", "AI Assistant Project") + session.set_state("total_projects", session.get_state("total_projects", 0) + 1) + + return { + "status": "initialized", + "project_name": workflow.get_state("project_name"), + "budget": workflow.get_state("budget"), + "session_projects": session.get_state("total_projects") + } + +def add_feature_to_project(feature_name: str, estimated_cost: int) -> Dict[str, Any]: + """Add a feature to the project and update state""" + features = workflow.get_state("features", []) + budget = workflow.get_state("budget", 0) + spent = workflow.get_state("spent", 0) + + if spent + estimated_cost > budget: + return { + "status": "rejected", + "reason": "over_budget", + "available_budget": budget - spent + } + + feature = { + "name": feature_name, + "cost": estimated_cost, + "status": "planned" + } + + features.append(feature) + workflow.set_state("features", features) + workflow.set_state("spent", spent + estimated_cost) + + return { + "status": "added", + "feature": feature, + "total_features": len(features), + "remaining_budget": budget - (spent + estimated_cost) + } + +def implement_next_feature() -> Dict[str, Any]: + """Implement the next planned feature""" + features = workflow.get_state("features", []) + completed = workflow.get_state("completed_features", []) + + # Find next planned feature + next_feature = None + for i, feature in enumerate(features): + if feature["status"] == "planned": + next_feature = feature + next_feature["status"] = "completed" + features[i] = next_feature + break + + if not next_feature: + workflow.set_state("stage", "completed") + return { + "status": "no_more_features", + "completed_count": len(completed), + "stage": "completed" + } + + completed.append(next_feature["name"]) + workflow.set_state("features", features) + workflow.set_state("completed_features", completed) + workflow.set_state("stage", "implementing") + + # Update session state + session.set_state("last_implemented_feature", next_feature["name"]) + + return { + "status": "implemented", + "feature": next_feature, + "completed_count": len(completed), + "remaining_count": sum(1 for f in features if f["status"] == "planned") + } + +def check_project_status() -> Dict[str, Any]: + """Check the current project status from state""" + return { + "project_name": workflow.get_state("project_name"), + "stage": workflow.get_state("stage"), + "total_features": len(workflow.get_state("features", [])), + "completed_features": len(workflow.get_state("completed_features", [])), + "budget": workflow.get_state("budget"), + "spent": workflow.get_state("spent"), + "remaining": workflow.get_state("budget", 0) - workflow.get_state("spent", 0), + "has_more_features": workflow.has_state("features") and any( + f["status"] == "planned" for f in workflow.get_state("features", []) + ), + "all_state": workflow.get_all_state() + } + +def retrieve_session_history() -> Dict[str, Any]: + """Retrieve historical data from session state""" + session_state = session.restore_state() + return { + "total_projects": session.get_state("total_projects", 0), + "last_project": session.get_state("last_project"), + "last_implemented_feature": session.get_state("last_implemented_feature"), + "session_id": session.session_id, + "full_session_state": session_state + } + +# Create agents with state-aware tools +project_manager = Agent( + name="ProjectManager", + role="Project planning and management", + goal="Initialize and manage project state effectively", + backstory="Experienced project manager who tracks all project details", + tools=[initialize_project_state, add_feature_to_project, check_project_status], + llm="gpt-4o-mini" +) + +developer = Agent( + name="Developer", + role="Feature implementation", + goal="Implement features based on project state", + backstory="Senior developer who implements features systematically", + tools=[implement_next_feature, check_project_status], + llm="gpt-4o-mini" +) + +analyst = Agent( + name="Analyst", + role="Project analysis and reporting", + goal="Analyze project state and provide insights", + backstory="Data analyst who provides comprehensive project reports", + tools=[check_project_status, retrieve_session_history], + llm="gpt-4o-mini" +) + +# Create tasks that utilize state +init_task = Task( + name="initialize_project", + description="Initialize the project state with default values", + expected_output="Project initialization status with budget and name", + agent=project_manager, + tools=[initialize_project_state] +) + +plan_features_task = Task( + name="plan_features", + description="""Add the following features to the project: + 1. User Authentication (cost: 15000) + 2. Data Analytics Dashboard (cost: 25000) + 3. API Integration (cost: 20000) + 4. Mobile App Support (cost: 30000) + 5. Advanced Reporting (cost: 20000) + + Use the add_feature_to_project tool for each feature.""", + expected_output="List of features added with budget status", + agent=project_manager, + tools=[add_feature_to_project], + context=[init_task] +) + +implement_features_task = Task( + name="implement_features", + description="""Implement all planned features one by one. + Use implement_next_feature tool repeatedly until all features are completed. + Check project status after each implementation.""", + expected_output="Implementation progress and final status", + agent=developer, + tools=[implement_next_feature, check_project_status], + context=[plan_features_task] +) + +analyze_project_task = Task( + name="analyze_project", + description="""Provide a comprehensive analysis of the project including: + 1. Current project state and completion status + 2. Budget utilization + 3. Historical session data + 4. Summary of all state information""", + expected_output="Detailed project analysis report", + agent=analyst, + tools=[check_project_status, retrieve_session_history], + context=[implement_features_task] +) + +# Create workflow with state management +workflow = PraisonAIAgents( + agents=[project_manager, developer, analyst], + tasks=[init_task, plan_features_task, implement_features_task, analyze_project_task], + verbose=1, + process="sequential" +) + +# Demonstrate state operations before starting +print("\n=== State Management Demo ===") +print("\n1. Initial State Check:") +print(f" Has 'project_name' state: {workflow.has_state('project_name')}") +print(f" All state: {workflow.get_all_state()}") + +# Run the workflow +print("\n2. Running Workflow...") +result = workflow.start() + +# Demonstrate state after workflow +print("\n3. Final State Check:") +print(f" Project Name: {workflow.get_state('project_name')}") +print(f" Stage: {workflow.get_state('stage')}") +print(f" Features: {len(workflow.get_state('features', []))}") +print(f" Completed: {workflow.get_state('completed_features', [])}") +print(f" Budget Spent: ${workflow.get_state('spent')}/{workflow.get_state('budget')}") + +# Test state persistence +print("\n4. Session State Persistence:") +print(f" Total Projects (this session): {session.get_state('total_projects')}") +print(f" Last Project: {session.get_state('last_project')}") + +# Save session state +session.save_state({"workflow_completed": True, "timestamp": time.time()}) + +# Demonstrate state update and deletion +print("\n5. State Manipulation:") +workflow.update_state({"additional_notes": "Project completed successfully"}) +print(f" After update: {workflow.get_state('additional_notes')}") + +workflow.delete_state("additional_notes") +print(f" After delete, has 'additional_notes': {workflow.has_state('additional_notes')}") + +# Clear all workflow state (optional) +# workflow.clear_state() +# print(f" After clear: {workflow.get_all_state()}") + +print("\n=== State Management Demo Complete ===") \ No newline at end of file diff --git a/src/praisonai-agents/tests/state_with_memory_example.py b/src/praisonai-agents/tests/state_with_memory_example.py new file mode 100644 index 000000000..e9b34ade3 --- /dev/null +++ b/src/praisonai-agents/tests/state_with_memory_example.py @@ -0,0 +1,397 @@ +""" +State with Memory Integration Example +==================================== + +This example demonstrates: +1. Using shared memory for state persistence +2. State sharing between agents via memory +3. Combining workflow state with memory storage +4. Historical state tracking +""" + +from praisonaiagents import Agent, Task, PraisonAIAgents +from praisonaiagents.memory import Memory +from typing import Dict, Any, List +import json +import time +from datetime import datetime + +# Initialize shared memory +memory_config = { + "config": { + "collection_name": "state_memory_demo", + "path": "./state_memory_db" + } +} + +shared_memory = Memory(**memory_config) + +# Tool functions that use both state and memory +def initialize_conversation_state(user_name: str, topic: str) -> Dict[str, Any]: + """Initialize conversation state and store in memory""" + # Set workflow state + workflow.set_state("user_name", user_name) + workflow.set_state("topic", topic) + workflow.set_state("start_time", time.time()) + workflow.set_state("turn_count", 0) + workflow.set_state("conversation_history", []) + + # Store in memory for persistence + context = f"Starting conversation with {user_name} about {topic}" + shared_memory.add( + text=context, + metadata={ + "type": "conversation_start", + "user": user_name, + "topic": topic, + "timestamp": datetime.now().isoformat() + } + ) + + return { + "status": "initialized", + "user": user_name, + "topic": topic, + "memory_stored": True + } + +def add_conversation_turn(speaker: str, message: str) -> Dict[str, Any]: + """Add a conversation turn to state and memory""" + # Get current state + history = workflow.get_state("conversation_history", []) + turn_count = workflow.get_state("turn_count", 0) + + # Create turn entry + turn = { + "turn": turn_count + 1, + "speaker": speaker, + "message": message, + "timestamp": time.time() + } + + # Update state + history.append(turn) + workflow.set_state("conversation_history", history) + workflow.set_state("turn_count", turn_count + 1) + workflow.set_state(f"last_{speaker}_message", message) + + # Store in memory + shared_memory.add( + text=f"{speaker}: {message}", + metadata={ + "type": "conversation_turn", + "speaker": speaker, + "turn": turn_count + 1, + "topic": workflow.get_state("topic"), + "user": workflow.get_state("user_name") + } + ) + + return { + "turn_added": turn, + "total_turns": turn_count + 1, + "history_length": len(history) + } + +def search_conversation_memory(query: str, speaker: str = None) -> Dict[str, Any]: + """Search through conversation memory""" + # Build metadata filter + metadata_filter = {"type": "conversation_turn"} + if speaker: + metadata_filter["speaker"] = speaker + + # Search memory + results = shared_memory.search( + query=query, + n=5, + metadata_filter=metadata_filter + ) + + # Update state with search results + workflow.set_state("last_search_query", query) + workflow.set_state("last_search_results", len(results)) + + return { + "query": query, + "results_count": len(results), + "results": [ + { + "text": r["text"], + "speaker": r["metadata"].get("speaker"), + "turn": r["metadata"].get("turn"), + "relevance": r.get("score", 0) + } + for r in results + ] + } + +def analyze_conversation_patterns() -> Dict[str, Any]: + """Analyze patterns in conversation using state and memory""" + # Get state data + history = workflow.get_state("conversation_history", []) + user_name = workflow.get_state("user_name", "Unknown") + topic = workflow.get_state("topic", "Unknown") + + # Calculate metrics from state + total_turns = len(history) + speakers = {} + + for turn in history: + speaker = turn["speaker"] + speakers[speaker] = speakers.get(speaker, 0) + 1 + + # Search memory for related conversations + related_convos = shared_memory.search( + query=topic, + n=10, + metadata_filter={"type": "conversation_turn"} + ) + + # Analyze patterns + patterns = { + "current_conversation": { + "user": user_name, + "topic": topic, + "total_turns": total_turns, + "speaker_distribution": speakers, + "duration_seconds": time.time() - workflow.get_state("start_time", time.time()) + }, + "memory_insights": { + "related_conversations": len(related_convos), + "common_speakers": list(set(r["metadata"].get("speaker") for r in related_convos)), + "topics_discussed": list(set(r["metadata"].get("topic") for r in related_convos)) + } + } + + # Store analysis in state + workflow.set_state("conversation_analysis", patterns) + + return patterns + +def retrieve_user_history(user_name: str) -> Dict[str, Any]: + """Retrieve all historical data for a user from memory""" + # Search memory for user's conversations + user_history = shared_memory.search( + query=user_name, + n=20, + metadata_filter={"user": user_name} + ) + + # Organize by conversation topics + topics = {} + for item in user_history: + topic = item["metadata"].get("topic", "Unknown") + if topic not in topics: + topics[topic] = [] + topics[topic].append({ + "text": item["text"], + "type": item["metadata"].get("type"), + "timestamp": item["metadata"].get("timestamp") + }) + + # Update state with user history + workflow.set_state("user_history_retrieved", True) + workflow.set_state("user_topics", list(topics.keys())) + + return { + "user": user_name, + "total_interactions": len(user_history), + "topics_discussed": list(topics.keys()), + "conversation_count": len(topics), + "history_by_topic": topics + } + +def summarize_and_save_state() -> Dict[str, Any]: + """Create a summary of the conversation and save final state""" + # Get all conversation data + history = workflow.get_state("conversation_history", []) + analysis = workflow.get_state("conversation_analysis", {}) + user_name = workflow.get_state("user_name") + topic = workflow.get_state("topic") + + # Create summary + summary = { + "conversation_id": f"{user_name}_{topic}_{int(time.time())}", + "user": user_name, + "topic": topic, + "total_turns": len(history), + "duration": time.time() - workflow.get_state("start_time", 0), + "participants": list(set(turn["speaker"] for turn in history)), + "key_points": [turn["message"][:50] + "..." for turn in history[-3:]], # Last 3 messages + "analysis": analysis + } + + # Save summary to memory + shared_memory.add( + text=json.dumps(summary, indent=2), + metadata={ + "type": "conversation_summary", + "user": user_name, + "topic": topic, + "timestamp": datetime.now().isoformat(), + "conversation_id": summary["conversation_id"] + } + ) + + # Clear workflow state but keep summary + workflow.set_state("final_summary", summary) + workflow.set_state("state_saved_to_memory", True) + + return summary + +# Create agents with memory-aware tools +conversation_manager = Agent( + name="ConversationManager", + role="Manage conversation state and memory", + goal="Initialize and track conversation state effectively", + backstory="Expert in conversation management and state tracking", + tools=[initialize_conversation_state, add_conversation_turn, summarize_and_save_state], + memory=shared_memory, + llm="gpt-4o-mini" +) + +memory_analyst = Agent( + name="MemoryAnalyst", + role="Analyze conversation patterns from memory", + goal="Extract insights from conversation history", + backstory="Specialist in pattern recognition and memory analysis", + tools=[search_conversation_memory, analyze_conversation_patterns, retrieve_user_history], + memory=shared_memory, + llm="gpt-4o-mini" +) + +# Create tasks +init_conversation_task = Task( + name="init_conversation", + description="Initialize a conversation with user 'Alice' about 'AI and Future of Work'", + expected_output="Initialization status", + agent=conversation_manager, + tools=[initialize_conversation_state] +) + +simulate_conversation_task = Task( + name="simulate_conversation", + description="""Simulate a conversation by adding these turns: + 1. Alice: "What are the main impacts of AI on employment?" + 2. Assistant: "AI is transforming employment through automation and augmentation." + 3. Alice: "Which industries are most affected?" + 4. Assistant: "Manufacturing, customer service, and data analysis see significant changes." + 5. Alice: "How can workers prepare for these changes?" + + Use add_conversation_turn for each message.""", + expected_output="Conversation simulation results", + agent=conversation_manager, + tools=[add_conversation_turn], + context=[init_conversation_task] +) + +search_memory_task = Task( + name="search_memory", + description="""Search conversation memory for: + 1. Messages about 'automation' + 2. All messages from 'Alice' + 3. Messages about 'employment'""", + expected_output="Search results from memory", + agent=memory_analyst, + tools=[search_conversation_memory], + context=[simulate_conversation_task] +) + +analyze_patterns_task = Task( + name="analyze_patterns", + description="Analyze conversation patterns and extract insights", + expected_output="Pattern analysis results", + agent=memory_analyst, + tools=[analyze_conversation_patterns], + context=[search_memory_task] +) + +retrieve_history_task = Task( + name="retrieve_history", + description="Retrieve all historical data for user 'Alice'", + expected_output="User history summary", + agent=memory_analyst, + tools=[retrieve_user_history], + context=[analyze_patterns_task] +) + +save_state_task = Task( + name="save_state", + description="Create final summary and save conversation state to memory", + expected_output="Final conversation summary", + agent=conversation_manager, + tools=[summarize_and_save_state], + context=[retrieve_history_task] +) + +# Create workflow with shared memory +workflow = PraisonAIAgents( + agents=[conversation_manager, memory_analyst], + tasks=[ + init_conversation_task, + simulate_conversation_task, + search_memory_task, + analyze_patterns_task, + retrieve_history_task, + save_state_task + ], + memory=memory_config, # This creates shared memory for all agents + verbose=1, + process="sequential" +) + +# Run workflow +print("\n=== State with Memory Integration Demo ===") +print("\n1. Starting workflow with memory-based state management...") +result = workflow.start() + +# Display results +print("\n2. Final State Summary:") +final_summary = workflow.get_state("final_summary", {}) +if final_summary: + print(f" Conversation ID: {final_summary.get('conversation_id')}") + print(f" Total Turns: {final_summary.get('total_turns')}") + print(f" Duration: {final_summary.get('duration', 0):.2f} seconds") + print(f" Participants: {final_summary.get('participants')}") + +print("\n3. Memory Storage Status:") +print(f" State saved to memory: {workflow.get_state('state_saved_to_memory', False)}") +print(f" User history retrieved: {workflow.get_state('user_history_retrieved', False)}") +print(f" Topics in memory: {workflow.get_state('user_topics', [])}") + +# Test memory persistence by creating new workflow instance +print("\n4. Testing Memory Persistence:") +print(" Creating new workflow instance to test memory retrieval...") + +# Create a simple test agent to verify memory persistence +test_agent = Agent( + name="MemoryTester", + role="Test memory persistence", + goal="Verify stored conversation data", + backstory="Memory system tester", + tools=[search_conversation_memory], + memory=shared_memory, + llm="gpt-4o-mini" +) + +test_task = Task( + name="test_memory", + description="Search memory for 'Alice' to verify persistence", + expected_output="Memory search results", + agent=test_agent, + tools=[search_conversation_memory] +) + +test_workflow = PraisonAIAgents( + agents=[test_agent], + tasks=[test_task], + memory=memory_config, + verbose=0 +) + +# This will use the same memory collection and find previously stored data +test_result = test_workflow.start() + +print("\n=== Demo Complete ===") +print("\nNote: The conversation data has been persisted in memory and can be") +print("retrieved in future sessions using the same memory configuration.") \ No newline at end of file diff --git a/src/praisonai-agents/tests/telemetry_example.py b/src/praisonai-agents/tests/telemetry_example.py new file mode 100644 index 000000000..0e07629f3 --- /dev/null +++ b/src/praisonai-agents/tests/telemetry_example.py @@ -0,0 +1,258 @@ +""" +Example demonstrating telemetry usage in PraisonAI Agents. + +This example shows how to: +1. Enable telemetry with OpenTelemetry backend +2. Use automatic instrumentation +3. Use manual instrumentation +4. Export telemetry data +""" + +import os +from praisonaiagents import ( + Agent, Task, PraisonAIAgents, + enable_telemetry, disable_telemetry, get_telemetry_collector +) +from praisonaiagents.tools import DuckDuckGoSearchTool + +# Set current path to package root directory +import os +import sys +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + + +def basic_telemetry_example(): + """Basic example with automatic telemetry.""" + print("\n=== Basic Telemetry Example ===\n") + + # Enable telemetry - it will automatically instrument all agents and tasks + telemetry = enable_telemetry( + backend="opentelemetry", + service_name="praisonai-demo", + exporter="console" # Use console exporter for demo + ) + + if not telemetry: + print("Telemetry dependencies not installed. Install with:") + print("pip install praisonaiagents[telemetry]") + return + + # Create agents - telemetry will be automatically added + researcher = Agent( + name="Researcher", + role="Information gatherer", + goal="Find accurate information about topics", + backstory="You are an expert researcher with attention to detail", + tools=[DuckDuckGoSearchTool()], + llm="gpt-4o-mini" + ) + + writer = Agent( + name="Writer", + role="Content creator", + goal="Create engaging content based on research", + backstory="You are a skilled writer who creates clear, engaging content", + llm="gpt-4o-mini" + ) + + # Create tasks + research_task = Task( + name="research_task", + description="Research the latest developments in quantum computing", + expected_output="A summary of recent quantum computing breakthroughs", + agent=researcher + ) + + writing_task = Task( + name="writing_task", + description="Write a blog post about quantum computing developments", + expected_output="A 300-word blog post suitable for a general audience", + agent=writer, + context=[research_task] # Depends on research task + ) + + # Create workflow + workflow = PraisonAIAgents( + agents=[researcher, writer], + tasks=[research_task, writing_task], + process="sequential", + verbose=True + ) + + # Run workflow - all telemetry will be collected automatically + print("Running workflow with telemetry enabled...\n") + result = workflow.start() + + # Get telemetry metrics + metrics = telemetry.get_metrics() + print("\n=== Telemetry Metrics ===") + print(f"Agent executions: {metrics['agent_executions']}") + print(f"Task completions: {metrics['task_completions']}") + print(f"Tool calls: {metrics['tool_calls']}") + print(f"LLM calls: {metrics['llm_calls']}") + print(f"Total tokens: {metrics['total_tokens']}") + print(f"Errors: {metrics['errors']}") + + # Disable telemetry + disable_telemetry() + + return result + + +def manual_telemetry_example(): + """Example with manual telemetry instrumentation.""" + print("\n=== Manual Telemetry Example ===\n") + + # Enable telemetry + telemetry = enable_telemetry(backend="custom") # Use logging backend + + if not telemetry: + return + + # Create agent without automatic instrumentation + agent = Agent( + name="Assistant", + role="General assistant", + goal="Help with various tasks", + backstory="You are a helpful AI assistant", + llm="gpt-4o-mini" + ) + + # Manual telemetry - trace custom operations + with telemetry.trace_agent_execution("Assistant", custom_field="demo"): + response = agent.chat("What is the capital of France?") + print(f"Response: {response}") + + # Use telemetry decorator for functions + @telemetry.trace(span_type="custom_operation") + def process_data(data): + """Process some data with telemetry.""" + # Simulate processing + import time + time.sleep(0.1) + + # Record custom metrics + telemetry.record_metric("data.processed", len(data), {"type": "text"}) + + return data.upper() + + # Call traced function + result = process_data("hello world") + print(f"Processed: {result}") + + # Manual event recording + telemetry._backend.record_event("custom_event", { + "user_action": "manual_example", + "success": True + }) + + disable_telemetry() + + +def advanced_telemetry_example(): + """Advanced example with custom telemetry backend and OTLP export.""" + print("\n=== Advanced Telemetry Example ===\n") + + # For OTLP export, you would typically have an OTLP collector running + # For this example, we'll use console export + + # Enable telemetry with OTLP configuration + telemetry = enable_telemetry( + backend="opentelemetry", + service_name="praisonai-production", + service_version="1.0.0", + exporter="otlp", # Would export to OTLP collector + otlp_endpoint="localhost:4317", # OTLP collector endpoint + metric_export_interval=10000 # Export metrics every 10 seconds + ) + + if not telemetry: + print("Telemetry not available. Using fallback.") + # Even without telemetry, the code still works + agent = Agent( + name="Analyst", + role="Data analyst", + goal="Analyze data and provide insights", + backstory="You are an experienced data analyst", + llm="gpt-4o-mini" + ) + + result = agent.chat("Analyze the trend: [10, 15, 13, 18, 22, 20, 25]") + print(f"Analysis: {result}") + return + + # Create instrumented agent + agent = Agent( + name="Analyst", + role="Data analyst", + goal="Analyze data and provide insights", + backstory="You are an experienced data analyst", + llm="gpt-4o-mini" + ) + + # Use context manager for grouped operations + with telemetry.trace("analysis_workflow", workflow_type="data_analysis"): + # Multiple operations within the same trace context + response1 = agent.chat("What is the mean of: [10, 15, 13, 18, 22, 20, 25]") + response2 = agent.chat("What is the trend in this data?") + response3 = agent.chat("Predict the next 3 values") + + # Record custom metrics + telemetry.record_metric("analysis.steps", 3) + telemetry.record_cost(0.001, model="gpt-4o-mini") # Track costs + + print("\nAnalysis complete. Telemetry data exported to OTLP collector.") + + disable_telemetry() + + +def telemetry_with_errors_example(): + """Example showing how telemetry handles errors.""" + print("\n=== Telemetry with Error Handling ===\n") + + telemetry = enable_telemetry() + + if not telemetry: + return + + agent = Agent( + name="ErrorProneAgent", + role="Test agent", + goal="Test error handling", + backstory="An agent for testing", + llm="gpt-4o-mini" + ) + + # This will be traced and errors will be recorded + try: + with telemetry.trace_agent_execution("ErrorProneAgent"): + # Simulate an error by using an invalid tool + agent.execute_tool("non_existent_tool", "test") + except Exception as e: + print(f"Caught error: {e}") + print("Error was recorded in telemetry") + + # Check error metrics + metrics = telemetry.get_metrics() + print(f"\nError count in telemetry: {metrics['errors']}") + + disable_telemetry() + + +if __name__ == "__main__": + print("PraisonAI Agents Telemetry Examples") + print("===================================") + + # Run basic example + basic_telemetry_example() + + # Run manual instrumentation example + manual_telemetry_example() + + # Run advanced example (commented out as it requires OTLP collector) + # advanced_telemetry_example() + + # Run error handling example + telemetry_with_errors_example() + + print("\n✅ All telemetry examples completed!") \ No newline at end of file From f2a6cebdfb55461b7fea6754296321f1d1c3b8e9 Mon Sep 17 00:00:00 2001 From: MervinPraison Date: Sat, 7 Jun 2025 00:24:28 +0100 Subject: [PATCH 2/3] Disable litellm telemetry across multiple modules for improved performance and consistency - Added environment variable to disable litellm telemetry in __init__.py, llm.py, and memory.py. - Ensured telemetry is disabled after importing litellm to prevent unnecessary data collection. This change enhances control over telemetry features while maintaining existing functionality. --- .../praisonaiagents/llm/__init__.py | 11 ++++ .../praisonaiagents/llm/llm.py | 6 +++ .../praisonaiagents/memory/memory.py | 4 ++ .../test_litellm_telemetry.py | 45 ++++++++++++++++ .../test_telemetry_disabled.py | 52 +++++++++++++++++++ 5 files changed, 118 insertions(+) create mode 100644 src/praisonai-agents/test_litellm_telemetry.py create mode 100644 src/praisonai-agents/test_telemetry_disabled.py diff --git a/src/praisonai-agents/praisonaiagents/llm/__init__.py b/src/praisonai-agents/praisonaiagents/llm/__init__.py index 35215a297..b8ac42f00 100644 --- a/src/praisonai-agents/praisonaiagents/llm/__init__.py +++ b/src/praisonai-agents/praisonaiagents/llm/__init__.py @@ -1,5 +1,9 @@ import logging import warnings +import os + +# Disable litellm telemetry before any imports +os.environ["LITELLM_TELEMETRY"] = "False" # Suppress all relevant logs at module level logging.getLogger("litellm").setLevel(logging.ERROR) @@ -17,4 +21,11 @@ # Import after suppressing warnings from .llm import LLM, LLMContextLengthExceededException +# Ensure telemetry is disabled after import as well +try: + import litellm + litellm.telemetry = False +except ImportError: + pass + __all__ = ["LLM", "LLMContextLengthExceededException"] diff --git a/src/praisonai-agents/praisonaiagents/llm/llm.py b/src/praisonai-agents/praisonaiagents/llm/llm.py index 480cadddd..78aab5034 100644 --- a/src/praisonai-agents/praisonaiagents/llm/llm.py +++ b/src/praisonai-agents/praisonaiagents/llm/llm.py @@ -17,6 +17,9 @@ from rich.console import Console from rich.live import Live +# Disable litellm telemetry before any imports +os.environ["LITELLM_TELEMETRY"] = "False" + # TODO: Include in-build tool calling in LLM class # TODO: Restructure so that duplicate calls are not made (Sync with agent.py) class LLMContextLengthExceededException(Exception): @@ -108,6 +111,9 @@ def __init__( ): try: import litellm + # Disable telemetry + litellm.telemetry = False + # Set litellm options globally litellm.set_verbose = False litellm.success_callback = [] diff --git a/src/praisonai-agents/praisonaiagents/memory/memory.py b/src/praisonai-agents/praisonaiagents/memory/memory.py index 1218c4f60..75a034943 100644 --- a/src/praisonai-agents/praisonaiagents/memory/memory.py +++ b/src/praisonai-agents/praisonaiagents/memory/memory.py @@ -6,6 +6,9 @@ from typing import Any, Dict, List, Optional, Union, Literal import logging +# Disable litellm telemetry before any imports +os.environ["LITELLM_TELEMETRY"] = "False" + # Set up logger logger = logging.getLogger(__name__) @@ -31,6 +34,7 @@ try: import litellm + litellm.telemetry = False # Disable telemetry LITELLM_AVAILABLE = True except ImportError: LITELLM_AVAILABLE = False diff --git a/src/praisonai-agents/test_litellm_telemetry.py b/src/praisonai-agents/test_litellm_telemetry.py new file mode 100644 index 000000000..f162d4d4a --- /dev/null +++ b/src/praisonai-agents/test_litellm_telemetry.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python3 +"""Test script to identify litellm telemetry behavior""" + +import os +import sys + +# Disable litellm telemetry before importing +os.environ["LITELLM_TELEMETRY"] = "False" +os.environ["LITELLM_LOG"] = "ERROR" + +print("Environment variables set:") +print(f"LITELLM_TELEMETRY: {os.environ.get('LITELLM_TELEMETRY')}") +print(f"LITELLM_LOG: {os.environ.get('LITELLM_LOG')}") + +print("\nImporting litellm...") +import litellm + +print(f"\nChecking litellm telemetry status:") +print(f"Has telemetry attribute: {hasattr(litellm, 'telemetry')}") +if hasattr(litellm, 'telemetry'): + print(f"Telemetry value: {litellm.telemetry}") + +# Try to disable telemetry programmatically +if hasattr(litellm, 'telemetry'): + litellm.telemetry = False + print(f"\nTelemetry disabled programmatically") + +# Check callbacks +print(f"\nCallbacks: {litellm.callbacks}") +print(f"Success callbacks: {litellm.success_callback}") +print(f"Async success callbacks: {litellm._async_success_callback}") + +# Test a simple completion +print("\n\nTesting completion (this might trigger telemetry)...") +try: + response = litellm.completion( + model="gpt-3.5-turbo", + messages=[{"role": "user", "content": "Say hi"}], + mock_response="Hi there!" + ) + print("Completion successful") +except Exception as e: + print(f"Error: {e}") + +print("\nDone. Check if any network requests were made to BerriAI/litellm") \ No newline at end of file diff --git a/src/praisonai-agents/test_telemetry_disabled.py b/src/praisonai-agents/test_telemetry_disabled.py new file mode 100644 index 000000000..c5926b370 --- /dev/null +++ b/src/praisonai-agents/test_telemetry_disabled.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python3 +"""Test to verify litellm telemetry is properly disabled""" + +import sys +import os + +print("Testing litellm telemetry is disabled...") + +# Test 1: Check environment variable is set +print(f"\n1. Environment variable LITELLM_TELEMETRY: {os.environ.get('LITELLM_TELEMETRY', 'NOT SET')}") + +# Test 2: Import praisonaiagents and check if telemetry is disabled +try: + from praisonaiagents.llm import LLM + print("2. Successfully imported LLM from praisonaiagents") + + # Check if litellm was imported and telemetry is disabled + import litellm + print(f"3. litellm.telemetry = {litellm.telemetry}") + + # Test 3: Create an LLM instance + llm = LLM(model="gpt-3.5-turbo") + print("4. Successfully created LLM instance") + + # Check telemetry again after instance creation + print(f"5. After LLM creation, litellm.telemetry = {litellm.telemetry}") + + # Test 4: Try a mock completion + print("\n6. Testing mock completion...") + response = litellm.completion( + model="gpt-3.5-turbo", + messages=[{"role": "user", "content": "test"}], + mock_response="test response" + ) + print(" Mock completion successful") + + # Final check + print(f"\n7. Final check: litellm.telemetry = {litellm.telemetry}") + + if litellm.telemetry == False: + print("\n✅ SUCCESS: Telemetry is properly disabled!") + else: + print("\n❌ FAILURE: Telemetry is still enabled!") + sys.exit(1) + +except Exception as e: + print(f"\n❌ ERROR: {e}") + import traceback + traceback.print_exc() + sys.exit(1) + +print("\nAll tests passed! Telemetry should be disabled.") \ No newline at end of file From 79ab958f350f13dcc7711a44b7c2364554489320 Mon Sep 17 00:00:00 2001 From: MervinPraison Date: Sat, 7 Jun 2025 01:27:29 +0100 Subject: [PATCH 3/3] Update PraisonAI and dependencies to version 2.2.31 and praisonaiagents to version 0.0.104 - Updated PraisonAI version in Dockerfiles and Ruby formula. - Adjusted dependency versions in pyproject.toml and uv.lock for consistency. - Enhanced README to reflect the new versioning. This change ensures compatibility with the latest features and improvements while maintaining existing functionality. --- docker/Dockerfile | 2 +- docker/Dockerfile.chat | 2 +- docker/Dockerfile.dev | 2 +- docker/Dockerfile.ui | 2 +- docker/README.md | 4 +- src/praisonai-agents/pyproject.toml | 10 +- src/praisonai-agents/uv.lock | 182 +++++++++++++--------------- src/praisonai/praisonai.rb | 4 +- src/praisonai/praisonai/deploy.py | 2 +- src/praisonai/pyproject.toml | 8 +- src/praisonai/uv.lock | 11 +- 11 files changed, 111 insertions(+), 118 deletions(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index a66355dfc..22a2fa27b 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -16,7 +16,7 @@ RUN mkdir -p /root/.praison # Install Python packages (using latest versions) RUN pip install --no-cache-dir \ flask \ - "praisonai>=2.2.30" \ + "praisonai>=2.2.31" \ "praisonai[api]" \ gunicorn \ markdown diff --git a/docker/Dockerfile.chat b/docker/Dockerfile.chat index 3e3ea1d6d..8c21c350e 100644 --- a/docker/Dockerfile.chat +++ b/docker/Dockerfile.chat @@ -16,7 +16,7 @@ RUN mkdir -p /root/.praison # Install Python packages (using latest versions) RUN pip install --no-cache-dir \ praisonai_tools \ - "praisonai>=2.2.30" \ + "praisonai>=2.2.31" \ "praisonai[chat]" \ "embedchain[github,youtube]" diff --git a/docker/Dockerfile.dev b/docker/Dockerfile.dev index 1538c9086..077ddbb59 100644 --- a/docker/Dockerfile.dev +++ b/docker/Dockerfile.dev @@ -20,7 +20,7 @@ RUN mkdir -p /root/.praison # Install Python packages (using latest versions) RUN pip install --no-cache-dir \ praisonai_tools \ - "praisonai>=2.2.30" \ + "praisonai>=2.2.31" \ "praisonai[ui]" \ "praisonai[chat]" \ "praisonai[realtime]" \ diff --git a/docker/Dockerfile.ui b/docker/Dockerfile.ui index fcf7d6e46..3a2b8c71a 100644 --- a/docker/Dockerfile.ui +++ b/docker/Dockerfile.ui @@ -16,7 +16,7 @@ RUN mkdir -p /root/.praison # Install Python packages (using latest versions) RUN pip install --no-cache-dir \ praisonai_tools \ - "praisonai>=2.2.30" \ + "praisonai>=2.2.31" \ "praisonai[ui]" \ "praisonai[crewai]" diff --git a/docker/README.md b/docker/README.md index da018bf1a..99884ce3f 100644 --- a/docker/README.md +++ b/docker/README.md @@ -121,7 +121,7 @@ healthcheck: ## 📦 Package Versions All Docker images use consistent, up-to-date versions: -- PraisonAI: `>=2.2.30` +- PraisonAI: `>=2.2.31` - PraisonAI Agents: `>=0.0.92` - Python: `3.11-slim` @@ -218,7 +218,7 @@ docker-compose up -d ### Version Pinning To use specific versions, update the Dockerfile: ```dockerfile -RUN pip install "praisonai==2.2.30" "praisonaiagents==0.0.92" +RUN pip install "praisonai==2.2.31" "praisonaiagents==0.0.92" ``` ## 🌐 Production Deployment diff --git a/src/praisonai-agents/pyproject.toml b/src/praisonai-agents/pyproject.toml index b709731d9..ea87482a8 100644 --- a/src/praisonai-agents/pyproject.toml +++ b/src/praisonai-agents/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "praisonaiagents" -version = "0.0.102" +version = "0.0.104" description = "Praison AI agents for completing complex tasks with Self Reflection Agents" requires-python = ">=3.10" authors = [ @@ -15,7 +15,7 @@ dependencies = [ "rich", "openai", "mcp>=1.6.0", - "posthog>=4.0.0" + "posthog>=3.0.0" ] [project.optional-dependencies] @@ -27,7 +27,7 @@ mcp = [ memory = [ "chromadb>=1.0.0", - "litellm>=1.50.0", + "litellm>=1.72.0", ] knowledge = [ @@ -45,7 +45,7 @@ graph = [ # Add LLM dependencies llm = [ - "litellm>=1.50.0", + "litellm>=1.72.0", "pydantic>=2.4.2" ] @@ -57,7 +57,7 @@ api = [ # Telemetry dependencies telemetry = [ - "posthog>=4.0.0" + "posthog>=3.0.0" ] # Combined features diff --git a/src/praisonai-agents/uv.lock b/src/praisonai-agents/uv.lock index 57816d499..350feae99 100644 --- a/src/praisonai-agents/uv.lock +++ b/src/praisonai-agents/uv.lock @@ -585,19 +585,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/cc/9d/37e5da7519de7b0b070a3fedd4230fe76d50d2a21403e0f2153d70ac4163/cryptography-44.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:62901fb618f74d7d81bf408c8719e9ec14d863086efe4185afd07c352aee1d2c", size = 3128774 }, ] -[[package]] -name = "dataclasses-json" -version = "0.6.7" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "marshmallow" }, - { name = "typing-inspect" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/64/a4/f71d9cf3a5ac257c993b5ca3f93df5f7fb395c725e7f1e6479d2514173c3/dataclasses_json-0.6.7.tar.gz", hash = "sha256:b6b3e528266ea45b9535223bc53ca645f5208833c29229e847b3f26a1cc55fc0", size = 32227 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/c3/be/d0d44e092656fe7a06b55e6103cbce807cdbdee17884a5367c68c9860853/dataclasses_json-0.6.7-py3-none-any.whl", hash = "sha256:0dbf33f26c8d5305befd61b39d2b3414e8a407bedc2834dea9b8d642666fb40a", size = 28686 }, -] - [[package]] name = "defusedxml" version = "0.7.1" @@ -1197,6 +1184,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/91/61/c80ef80ed8a0a21158e289ef70dac01e351d929a1c30cb0f49be60772547/jiter-0.8.2-cp313-cp313t-win_amd64.whl", hash = "sha256:3ac9f578c46f22405ff7f8b1f5848fb753cc4b8377fbec8470a7dc3997ca7566", size = 202374 }, ] +[[package]] +name = "json-repair" +version = "0.39.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/95/60/6d1599bc01070d9fe3840d245ae80fd24b981c732d962842825ce7a9fde6/json_repair-0.39.1.tar.gz", hash = "sha256:e90a489f247e1a8fc86612a5c719872a3dbf9cbaffd6d55f238ec571a77740fa", size = 30040 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ff/b9/2e445481555422b907dab468b53574bc1e995099ca1a1201d0d876ca05e9/json_repair-0.39.1-py3-none-any.whl", hash = "sha256:3001409a2f319249f13e13d6c622117a5b70ea7e0c6f43864a0233cdffc3a599", size = 20686 }, +] + [[package]] name = "jsonpatch" version = "1.33" @@ -1286,29 +1282,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ed/5c/5c0be747261e1f8129b875fa3bfea736bc5fe17652f9d5e15ca118571b6f/langchain-0.3.25-py3-none-any.whl", hash = "sha256:931f7d2d1eaf182f9f41c5e3272859cfe7f94fc1f7cef6b3e5a46024b4884c21", size = 1011008 }, ] -[[package]] -name = "langchain-community" -version = "0.3.24" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "aiohttp" }, - { name = "dataclasses-json" }, - { name = "httpx-sse" }, - { name = "langchain" }, - { name = "langchain-core" }, - { name = "langsmith" }, - { name = "numpy" }, - { name = "pydantic-settings" }, - { name = "pyyaml" }, - { name = "requests" }, - { name = "sqlalchemy" }, - { name = "tenacity" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/01/f6/4892d1f1cf6d3e89da6ee6cfb0eb82b908c706c58bde7df28367ee76a93f/langchain_community-0.3.24.tar.gz", hash = "sha256:62d9e8cf9aadf35182ec3925f9ec1c8e5e84fb4f199f67a01aee496d289dc264", size = 33233643 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/d5/cb/582f22d74d69f4dbd41e98d361ee36922b79a245a9411383327bd4b63747/langchain_community-0.3.24-py3-none-any.whl", hash = "sha256:b6cdb376bf1c2f4d2503aca20f8f35f2d5b3d879c52848277f20ce1950e7afaf", size = 2528335 }, -] - [[package]] name = "langchain-core" version = "0.3.63" @@ -1327,6 +1300,21 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/5c/71/a748861e6a69ab6ef50ab8e65120422a1f36245c71a0dd0f02de49c208e1/langchain_core-0.3.63-py3-none-any.whl", hash = "sha256:f91db8221b1bc6808f70b2e72fded1a94d50ee3f1dff1636fb5a5a514c64b7f5", size = 438468 }, ] +[[package]] +name = "langchain-neo4j" +version = "0.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "langchain" }, + { name = "langchain-core" }, + { name = "neo4j" }, + { name = "neo4j-graphrag" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f9/29/b1c485eaf5adffc59d97d2a47e9b5f3fcfc58becb0e92ded197b98c03138/langchain_neo4j-0.4.0.tar.gz", hash = "sha256:3f059a66411cec1062a2b8c44953a70d0fff9e123e9fb1d6b3f17a0bef6d6114", size = 27061 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/70/7e/d782a77ed3f561a466410aa384f93f59e33930e3d64686d0e1dac4d598c5/langchain_neo4j-0.4.0-py3-none-any.whl", hash = "sha256:2760b5757e7a402884cf3419830217651df97fe4f44b3fec6c96b14b6d7fd18e", size = 31349 }, +] + [[package]] name = "langchain-text-splitters" version = "0.3.8" @@ -1359,7 +1347,7 @@ wheels = [ [[package]] name = "litellm" -version = "1.59.1" +version = "1.72.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiohttp" }, @@ -1374,9 +1362,9 @@ dependencies = [ { name = "tiktoken" }, { name = "tokenizers" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/92/98/80766e2820e140b4b03a4b3e997d25e522bc2ce1d3c79c98a7b856a84e1f/litellm-1.59.1.tar.gz", hash = "sha256:ca4fd7789cc493daa63fe0dd69e3351237708c48a9305ccbc685a1bcf9eecf98", size = 6374267 } +sdist = { url = "https://files.pythonhosted.org/packages/d0/a7/67da5f515c3847bd121b5fde54ac48ee3e7d55d75c49d2b51a2f9b20ab7a/litellm-1.72.1.tar.gz", hash = "sha256:d5d822988d62db8fe434aca7584e1cf8f5a03c033e957e3b5c1ca24366840951", size = 8095871 } wheels = [ - { url = "https://files.pythonhosted.org/packages/ff/3c/d2edad7c3cc85d83a4a6929c80ee258ae8875e2366366b9ab93eaf817cb3/litellm-1.59.1-py3-none-any.whl", hash = "sha256:69adf83f6942b9b62d3398cd8c1dc2e53b02e9b97bc5c45c427ed917629048e7", size = 6661803 }, + { url = "https://files.pythonhosted.org/packages/10/2d/48dee9e0cc782a74d7361a3a1acf70d9ac8a9278a4fa8d2dcf2ac7359ed8/litellm-1.72.1-py3-none-any.whl", hash = "sha256:858b4458f199dd8d3acd46436cf5fb1de19404e5dcf92ff59ec417c224b52481", size = 7994774 }, ] [[package]] @@ -1608,18 +1596,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/4f/65/6079a46068dfceaeabb5dcad6d674f5f5c61a6fa5673746f42a9f4c233b3/MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f", size = 15739 }, ] -[[package]] -name = "marshmallow" -version = "3.26.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "packaging" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/ab/5e/5e53d26b42ab75491cda89b871dab9e97c840bf12c63ec58a1919710cd06/marshmallow-3.26.1.tar.gz", hash = "sha256:e6d8affb6cb61d39d26402096dc0aee12d5a26d490a121f118d2e81dc0719dc6", size = 221825 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/34/75/51952c7b2d3873b44a0028b1bd26a25078c18f92f256608e8d1dc61b39fd/marshmallow-3.26.1-py3-none-any.whl", hash = "sha256:3350409f20a70a7e4e11a27661187b77cdcaeb20abca41c1454fe33636bea09c", size = 50878 }, -] - [[package]] name = "mcp" version = "1.6.0" @@ -1650,7 +1626,7 @@ wheels = [ [[package]] name = "mem0ai" -version = "0.1.44" +version = "0.1.104" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "openai" }, @@ -1660,14 +1636,14 @@ dependencies = [ { name = "qdrant-client" }, { name = "sqlalchemy" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/81/e3/46e695de8b7d1dc3f922425b01797c1bd3ee63bde449f22d0f1f10bd9df9/mem0ai-0.1.44.tar.gz", hash = "sha256:93214272915d94f673d370bb8fe7a8bfc21806267e65700b471bec454dcdfa5c", size = 63273 } +sdist = { url = "https://files.pythonhosted.org/packages/49/d3/d2bbab0e505be71794bb5d53a67c740cf0ed8187d3965d68e317d7f3b208/mem0ai-0.1.104.tar.gz", hash = "sha256:4193bc2a2d5e9e299f3efb5ad80a9f3296e343b20e24cf2326c1ac7efdcfc773", size = 100245 } wheels = [ - { url = "https://files.pythonhosted.org/packages/8b/37/96382bf64375237d1fed98813d19aba725e8089e1044b7a0873ddf74576b/mem0ai-0.1.44-py3-none-any.whl", hash = "sha256:32260a2cd935035a1b16ce04ad2e4510a5bd97618709466e2d06303e0eb8d9d4", size = 89595 }, + { url = "https://files.pythonhosted.org/packages/da/03/818b01ad786aeb719ca73408c9ceae9e8193bc1a6aaba86c11c8fdd002b8/mem0ai-0.1.104-py3-none-any.whl", hash = "sha256:7c1f7dd11dcfa8ce827a2911a9dceee98ec8e149a5454e3f8673504832a9186d", size = 156132 }, ] [package.optional-dependencies] graph = [ - { name = "langchain-community" }, + { name = "langchain-neo4j" }, { name = "neo4j" }, { name = "rank-bm25" }, ] @@ -1744,15 +1720,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/9d/d3/f7e6d7d062b8d7072c3989a528d9d47486ee5d5ae75250f6e26b4976d098/mmh3-5.0.1-cp313-cp313-win_arm64.whl", hash = "sha256:122fa9ec148383f9124292962bda745f192b47bfd470b2af5fe7bb3982b17896", size = 36539 }, ] -[[package]] -name = "monotonic" -version = "1.6" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/ea/ca/8e91948b782ddfbd194f323e7e7d9ba12e5877addf04fb2bf8fca38e86ac/monotonic-1.6.tar.gz", hash = "sha256:3a55207bcfed53ddd5c5bae174524062935efed17792e9de2ad0205ce9ad63f7", size = 7615 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/9a/67/7e8406a29b6c45be7af7740456f7f37025f0506ae2e05fb9009a53946860/monotonic-1.6-py2.py3-none-any.whl", hash = "sha256:68687e19a14f11f26d140dd5c86f3dba4bf5df58003000ed467e0e2a69bca96c", size = 8154 }, -] - [[package]] name = "mpmath" version = "1.3.0" @@ -1860,15 +1827,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/99/b7/b9e70fde2c0f0c9af4cc5277782a89b66d35948ea3369ec9f598358c3ac5/multidict-6.1.0-py3-none-any.whl", hash = "sha256:48e171e52d1c4d33888e529b999e5900356b9ae588c2f09a52dcefb158b27506", size = 10051 }, ] -[[package]] -name = "mypy-extensions" -version = "1.1.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/a2/6e/371856a3fb9d31ca8dac321cda606860fa4548858c0cc45d9d1d4ca2628b/mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558", size = 6343 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/79/7b/2c79738432f5c924bef5071f933bcc9efd0473bac3b4aa584a6f7c1c8df8/mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505", size = 4963 }, -] - [[package]] name = "neo4j" version = "5.28.1" @@ -1881,6 +1839,24 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/6a/57/94225fe5e9dabdc0ff60c88cbfcedf11277f4b34e7ab1373d3e62dbdd207/neo4j-5.28.1-py3-none-any.whl", hash = "sha256:6755ef9e5f4e14b403aef1138fb6315b120631a0075c138b5ddb2a06b87b09fd", size = 312258 }, ] +[[package]] +name = "neo4j-graphrag" +version = "1.7.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "fsspec" }, + { name = "json-repair" }, + { name = "neo4j" }, + { name = "pydantic" }, + { name = "pypdf" }, + { name = "pyyaml" }, + { name = "types-pyyaml" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7c/5a/b9309d488b7cfe63326b6502b12dfae16558d64ead4d007b5e6392dd36ac/neo4j_graphrag-1.7.0.tar.gz", hash = "sha256:c6c1f730e680d8af3ff3e1ee6086c465c15dd3245157e6c40606ac3ddae7d4f0", size = 104306 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/98/2a/b9e29d7a1068302f94dc05d682f1068795947f6c0a99d8274e7f1ca29d23/neo4j_graphrag-1.7.0-py3-none-any.whl", hash = "sha256:29a854f2f1e268f043446cdd387c72ee954b87726329ef6479c59ed7b9cf0751", size = 180365 }, +] + [[package]] name = "numpy" version = "2.2.1" @@ -1999,7 +1975,7 @@ wheels = [ [[package]] name = "openai" -version = "1.58.1" +version = "1.84.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -2011,9 +1987,9 @@ dependencies = [ { name = "tqdm" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/27/3c/b1ecce430ed56fa3ac1b0676966d3250aab9c70a408232b71e419ea62148/openai-1.58.1.tar.gz", hash = "sha256:f5a035fd01e141fc743f4b0e02c41ca49be8fab0866d3b67f5f29b4f4d3c0973", size = 343411 } +sdist = { url = "https://files.pythonhosted.org/packages/91/a3/128caf24e116f48fad3e4d5122cdf84db06c5127911849d51663c66158c8/openai-1.84.0.tar.gz", hash = "sha256:4caa43bdab262cc75680ce1a2322cfc01626204074f7e8d9939ab372acf61698", size = 467066 } wheels = [ - { url = "https://files.pythonhosted.org/packages/8e/5a/d22cd07f1a99b9e8b3c92ee0c1959188db4318828a3d88c9daac120bdd69/openai-1.58.1-py3-none-any.whl", hash = "sha256:e2910b1170a6b7f88ef491ac3a42c387f08bd3db533411f7ee391d166571d63c", size = 454279 }, + { url = "https://files.pythonhosted.org/packages/2a/10/f245db006a860dbc1f2e2c8382e0a1762c7753e7971ba43a1dc3f3ec1404/openai-1.84.0-py3-none-any.whl", hash = "sha256:7ec4436c3c933d68dc0f5a0cef0cb3dbc0864a54d62bddaf2ed5f3d521844711", size = 725512 }, ] [[package]] @@ -2382,27 +2358,28 @@ wheels = [ [[package]] name = "posthog" -version = "3.7.5" +version = "4.3.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "backoff" }, - { name = "monotonic" }, + { name = "distro" }, { name = "python-dateutil" }, { name = "requests" }, { name = "six" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/58/e9/1cd7492bb58dd255129467e1221e2d6f51aa0c6f3c781ac9ac29cc8a2859/posthog-3.7.5.tar.gz", hash = "sha256:8ba40ab623da35db72715fc87fe7dccb7fc272ced92581fe31db2d4dbe7ad761", size = 50269 } +sdist = { url = "https://files.pythonhosted.org/packages/89/df/cb1b1837a38dfde0eca9b7947037ef0abf9e7732a7c29b8eab6cd4af1a0b/posthog-4.3.2.tar.gz", hash = "sha256:1dc422ccef560ee0949b00e340e8135160fd71a4eb03f06ec382c4672ab0fe4c", size = 85399 } wheels = [ - { url = "https://files.pythonhosted.org/packages/76/bd/2d550ac79443cdbb6a5a4193c37820f04df0499e1ecbe8e41c5462cf0c2d/posthog-3.7.5-py2.py3-none-any.whl", hash = "sha256:022132c17069dde03c5c5904e2ae1b9bd68d5059cbc5a8dffc5c1537a1b71cb5", size = 54882 }, + { url = "https://files.pythonhosted.org/packages/2e/e7/2983efeec4356cbae0fcd775effc76576377ae86330d25c150a5bbf8fa4c/posthog-4.3.2-py2.py3-none-any.whl", hash = "sha256:e53d0e20cc8d4ec9cb62f0cbf6034e8f93e7f5b3fc502dfcc8813feb12a06bb7", size = 102036 }, ] [[package]] name = "praisonaiagents" -version = "0.0.102" +version = "0.0.104" source = { editable = "." } dependencies = [ { name = "mcp" }, { name = "openai" }, + { name = "posthog" }, { name = "pydantic" }, { name = "rich" }, ] @@ -2416,6 +2393,7 @@ all = [ { name = "markitdown", extra = ["all"] }, { name = "mcp" }, { name = "mem0ai", extra = ["graph"] }, + { name = "posthog" }, { name = "pydantic" }, { name = "uvicorn" }, ] @@ -2446,6 +2424,9 @@ memory = [ { name = "chromadb" }, { name = "litellm" }, ] +telemetry = [ + { name = "posthog" }, +] [package.metadata] requires-dist = [ @@ -2455,20 +2436,23 @@ requires-dist = [ { name = "chromadb", marker = "extra == 'memory'", specifier = ">=1.0.0" }, { name = "fastapi", marker = "extra == 'api'", specifier = ">=0.115.0" }, { name = "fastapi", marker = "extra == 'mcp'", specifier = ">=0.115.0" }, - { name = "litellm", marker = "extra == 'llm'", specifier = ">=1.50.0" }, - { name = "litellm", marker = "extra == 'memory'", specifier = ">=1.50.0" }, + { name = "litellm", marker = "extra == 'llm'", specifier = ">=1.72.0" }, + { name = "litellm", marker = "extra == 'memory'", specifier = ">=1.72.0" }, { name = "markitdown", extras = ["all"], marker = "extra == 'knowledge'", specifier = ">=0.1.0" }, { name = "mcp", specifier = ">=1.6.0" }, { name = "mcp", marker = "extra == 'mcp'", specifier = ">=1.6.0" }, { name = "mem0ai", marker = "extra == 'knowledge'", specifier = ">=0.1.0" }, { name = "mem0ai", extras = ["graph"], marker = "extra == 'graph'", specifier = ">=0.1.0" }, { name = "openai" }, + { name = "posthog", specifier = ">=3.0.0" }, + { name = "posthog", marker = "extra == 'telemetry'", specifier = ">=3.0.0" }, { name = "praisonaiagents", extras = ["api"], marker = "extra == 'all'" }, { name = "praisonaiagents", extras = ["graph"], marker = "extra == 'all'" }, { name = "praisonaiagents", extras = ["knowledge"], marker = "extra == 'all'" }, { name = "praisonaiagents", extras = ["llm"], marker = "extra == 'all'" }, { name = "praisonaiagents", extras = ["mcp"], marker = "extra == 'all'" }, { name = "praisonaiagents", extras = ["memory"], marker = "extra == 'all'" }, + { name = "praisonaiagents", extras = ["telemetry"], marker = "extra == 'all'" }, { name = "pydantic" }, { name = "pydantic", marker = "extra == 'llm'", specifier = ">=2.4.2" }, { name = "rich" }, @@ -2727,6 +2711,18 @@ crypto = [ { name = "cryptography" }, ] +[[package]] +name = "pypdf" +version = "5.6.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/40/46/67de1d7a65412aa1c896e6b280829b70b57d203fadae6859b690006b8e0a/pypdf-5.6.0.tar.gz", hash = "sha256:a4b6538b77fc796622000db7127e4e58039ec5e6afd292f8e9bf42e2e985a749", size = 5023749 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/71/8b/dc3a72d98c22be7a4cbd664ad14c5a3e6295c2dbdf572865ed61e24b5e38/pypdf-5.6.0-py3-none-any.whl", hash = "sha256:ca6bf446bfb0a2d8d71d6d6bb860798d864c36a29b3d9ae8d7fc7958c59f88e7", size = 304208 }, +] + [[package]] name = "pypika" version = "0.48.9" @@ -3464,25 +3460,21 @@ wheels = [ ] [[package]] -name = "typing-extensions" -version = "4.12.2" +name = "types-pyyaml" +version = "6.0.12.20250516" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/df/db/f35a00659bc03fec321ba8bce9420de607a1d37f8342eee1863174c69557/typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8", size = 85321 } +sdist = { url = "https://files.pythonhosted.org/packages/4e/22/59e2aeb48ceeee1f7cd4537db9568df80d62bdb44a7f9e743502ea8aab9c/types_pyyaml-6.0.12.20250516.tar.gz", hash = "sha256:9f21a70216fc0fa1b216a8176db5f9e0af6eb35d2f2932acb87689d03a5bf6ba", size = 17378 } wheels = [ - { url = "https://files.pythonhosted.org/packages/26/9f/ad63fc0248c5379346306f8668cda6e2e2e9c95e01216d2b8ffd9ff037d0/typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d", size = 37438 }, + { url = "https://files.pythonhosted.org/packages/99/5f/e0af6f7f6a260d9af67e1db4f54d732abad514252a7a378a6c4d17dd1036/types_pyyaml-6.0.12.20250516-py3-none-any.whl", hash = "sha256:8478208feaeb53a34cb5d970c56a7cd76b72659442e733e268a94dc72b2d0530", size = 20312 }, ] [[package]] -name = "typing-inspect" -version = "0.9.0" +name = "typing-extensions" +version = "4.12.2" source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "mypy-extensions" }, - { name = "typing-extensions" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/dc/74/1789779d91f1961fa9438e9a8710cdae6bd138c80d7303996933d117264a/typing_inspect-0.9.0.tar.gz", hash = "sha256:b23fc42ff6f6ef6954e4852c1fb512cdd18dbea03134f91f856a95ccc9461f78", size = 13825 } +sdist = { url = "https://files.pythonhosted.org/packages/df/db/f35a00659bc03fec321ba8bce9420de607a1d37f8342eee1863174c69557/typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8", size = 85321 } wheels = [ - { url = "https://files.pythonhosted.org/packages/65/f3/107a22063bf27bdccf2024833d3445f4eea42b2e598abfbd46f6a63b6cb0/typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f", size = 8827 }, + { url = "https://files.pythonhosted.org/packages/26/9f/ad63fc0248c5379346306f8668cda6e2e2e9c95e01216d2b8ffd9ff037d0/typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d", size = 37438 }, ] [[package]] diff --git a/src/praisonai/praisonai.rb b/src/praisonai/praisonai.rb index de283c54e..4a94dbb7c 100644 --- a/src/praisonai/praisonai.rb +++ b/src/praisonai/praisonai.rb @@ -3,8 +3,8 @@ class Praisonai < Formula desc "AI tools for various AI applications" homepage "https://github.com/MervinPraison/PraisonAI" - url "https://github.com/MervinPraison/PraisonAI/archive/refs/tags/v2.2.30.tar.gz" - sha256 `curl -sL https://github.com/MervinPraison/PraisonAI/archive/refs/tags/v2.2.30.tar.gz | shasum -a 256`.split.first + url "https://github.com/MervinPraison/PraisonAI/archive/refs/tags/v2.2.31.tar.gz" + sha256 `curl -sL https://github.com/MervinPraison/PraisonAI/archive/refs/tags/v2.2.31.tar.gz | shasum -a 256`.split.first license "MIT" depends_on "python@3.11" diff --git a/src/praisonai/praisonai/deploy.py b/src/praisonai/praisonai/deploy.py index 9ac7d426a..1953ebec5 100644 --- a/src/praisonai/praisonai/deploy.py +++ b/src/praisonai/praisonai/deploy.py @@ -56,7 +56,7 @@ def create_dockerfile(self): file.write("FROM python:3.11-slim\n") file.write("WORKDIR /app\n") file.write("COPY . .\n") - file.write("RUN pip install flask praisonai==2.2.30 gunicorn markdown\n") + file.write("RUN pip install flask praisonai==2.2.31 gunicorn markdown\n") file.write("EXPOSE 8080\n") file.write('CMD ["gunicorn", "-b", "0.0.0.0:8080", "api:app"]\n') diff --git a/src/praisonai/pyproject.toml b/src/praisonai/pyproject.toml index 60ff0defb..5a69c56e3 100644 --- a/src/praisonai/pyproject.toml +++ b/src/praisonai/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "PraisonAI" -version = "2.2.30" +version = "2.2.31" description = "PraisonAI is an AI Agents Framework with Self Reflection. PraisonAI application combines PraisonAI Agents, AutoGen, and CrewAI into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customisation, and efficient human-agent collaboration." readme = "README.md" license = "" @@ -12,7 +12,7 @@ dependencies = [ "rich>=13.7", "markdown>=3.5", "pyparsing>=3.0.0", - "praisonaiagents>=0.0.102", + "praisonaiagents>=0.0.104", "python-dotenv>=0.19.0", "instructor>=1.3.3", "PyYAML>=6.0", @@ -95,7 +95,7 @@ autogen = ["pyautogen>=0.2.19", "praisonai-tools>=0.0.15", "crewai"] [tool.poetry] name = "PraisonAI" -version = "2.2.30" +version = "2.2.31" description = "PraisonAI is an AI Agents Framework with Self Reflection. PraisonAI application combines PraisonAI Agents, AutoGen, and CrewAI into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customisation, and efficient human-agent collaboration." authors = ["Mervin Praison"] license = "" @@ -113,7 +113,7 @@ python = ">=3.10,<3.13" rich = ">=13.7" markdown = ">=3.5" pyparsing = ">=3.0.0" -praisonaiagents = ">=0.0.102" +praisonaiagents = ">=0.0.104" python-dotenv = ">=0.19.0" instructor = ">=1.3.3" PyYAML = ">=6.0" diff --git a/src/praisonai/uv.lock b/src/praisonai/uv.lock index 0aed76876..783b513eb 100644 --- a/src/praisonai/uv.lock +++ b/src/praisonai/uv.lock @@ -3931,7 +3931,7 @@ wheels = [ [[package]] name = "praisonai" -version = "2.2.30" +version = "2.2.31" source = { editable = "." } dependencies = [ { name = "instructor" }, @@ -4073,7 +4073,7 @@ requires-dist = [ { name = "plotly", marker = "extra == 'realtime'", specifier = ">=5.24.0" }, { name = "praisonai-tools", marker = "extra == 'autogen'", specifier = ">=0.0.15" }, { name = "praisonai-tools", marker = "extra == 'crewai'", specifier = ">=0.0.15" }, - { name = "praisonaiagents", specifier = ">=0.0.102" }, + { name = "praisonaiagents", specifier = ">=0.0.104" }, { name = "pyautogen", marker = "extra == 'autogen'", specifier = ">=0.2.19" }, { name = "pydantic", marker = "extra == 'chat'", specifier = "<=2.10.1" }, { name = "pydantic", marker = "extra == 'code'", specifier = "<=2.10.1" }, @@ -4130,17 +4130,18 @@ wheels = [ [[package]] name = "praisonaiagents" -version = "0.0.102" +version = "0.0.104" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "mcp" }, { name = "openai" }, + { name = "posthog" }, { name = "pydantic" }, { name = "rich" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/19/e5/6f89d32b022f86561a9ffd5ceba5e8bcd29a531396c19a591ac12efdbf3b/praisonaiagents-0.0.102.tar.gz", hash = "sha256:c51ef93663a4d46c09c956b34a291fbb2bfefc07f546add587c441f8d801113a", size = 145079 } +sdist = { url = "https://files.pythonhosted.org/packages/d1/e5/f7f0f4a6c2ce517332b0799f6081cc4d3b8016a3a8bc12df61d50004012a/praisonaiagents-0.0.104.tar.gz", hash = "sha256:88239bc9de3f6a6777bac13695d844aa9b3c8968d973e7ee1d3cf31d3ff9ac8b", size = 150329 } wheels = [ - { url = "https://files.pythonhosted.org/packages/0e/e4/2ddc2b3a38e118454ee6c047e08b2f0baea3fcf45fe85cffa19221e79968/praisonaiagents-0.0.102-py3-none-any.whl", hash = "sha256:52dfe9d15a0a0c6d44194e45d91fe513e5633164f3ff6426c41068c302c5ff7a", size = 165203 }, + { url = "https://files.pythonhosted.org/packages/54/53/34c367bf701082690194cc823c4e36784efd24fa07488022906153f48b0e/praisonaiagents-0.0.104-py3-none-any.whl", hash = "sha256:37a0ae97c63aa1cece4f5db67f7cc09b0e3d2b5ea7ba8d021062ee41edd03743", size = 171834 }, ] [[package]]