Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
39 changes: 25 additions & 14 deletions src/praisonai/praisonai/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,26 +27,37 @@
'LocalManagedConfig',
]

# Telemetry initialization state
# Telemetry initialization state - thread-safe
import threading

_telemetry_lock = threading.Lock()
_telemetry_initialized = False

def _ensure_telemetry_defaults() -> None:
"""Apply telemetry env defaults exactly once, on first observability use."""
"""Apply telemetry env defaults exactly once, on first observability use.

Thread-safe implementation using double-checked locking pattern.
"""
global _telemetry_initialized
if _telemetry_initialized:
return
import os
langfuse_configured = bool(
os.getenv("LANGFUSE_PUBLIC_KEY")
or os.path.exists(os.path.expanduser("~/.praisonai/langfuse.env"))
)
if langfuse_configured:
# Explicitly enable OTEL for Langfuse integration
os.environ["OTEL_SDK_DISABLED"] = "false"
else:
os.environ.setdefault("OTEL_SDK_DISABLED", "true")
os.environ.setdefault("EC_TELEMETRY", "false") # respect user overrides
_telemetry_initialized = True

with _telemetry_lock:
if _telemetry_initialized:
return

import os
langfuse_configured = bool(
os.getenv("LANGFUSE_PUBLIC_KEY")
or os.path.exists(os.path.expanduser("~/.praisonai/langfuse.env"))
)
if langfuse_configured:
# Explicitly enable OTEL for Langfuse integration
os.environ["OTEL_SDK_DISABLED"] = "false"
else:
os.environ.setdefault("OTEL_SDK_DISABLED", "true")
os.environ.setdefault("EC_TELEMETRY", "false") # respect user overrides
_telemetry_initialized = True


# Lazy loading for heavy imports
Expand Down
182 changes: 61 additions & 121 deletions src/praisonai/praisonai/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,118 +3,52 @@
PraisonAI CLI — Unified Entry Point.

Single entry point for all CLI invocations.
Routes to Typer-based commands for known subcommands,
falls back to legacy argparse for direct prompts and YAML files.
Makes Typer the single dispatcher with narrow legacy shim for bare prompts/YAML.

Design:
- Typer-first: all registered commands auto-discovered via Click
- Legacy fallback: prompts, .yaml paths, and deprecated --flags
- No manual command lists needed — adding a Typer command Just Works
- Typer owns all command resolution
- Legacy shim only for bare prompt/YAML invocations via Typer callback
- Fail loud on registration errors - no silent degradation
"""

import sys


# ---------------------------------------------------------------------------
# Internal helpers
# ---------------------------------------------------------------------------

_typer_commands_cache = None


def _get_typer_commands():
"""Auto-discover registered Typer commands via Click introspection.

Returns a set of command names that the Typer app knows about.
This is populated from app.py's register_commands() — no manual
lists to maintain.
def _is_legacy_invocation(argv: list[str]) -> bool:
"""Check if this is a bare prompt or bare YAML invocation.

Legacy invocations are:
- Bare YAML file: "agents.yaml"
- Free-text prompt: "Create a weather app"

All other invocations should be handled by Typer commands.
"""
global _typer_commands_cache
if _typer_commands_cache is not None:
return _typer_commands_cache

try:
from praisonai.cli.app import app, register_commands
register_commands()

import typer.main
import click
click_app = typer.main.get_command(app)
ctx = click.Context(click_app, info_name="praisonai")
_typer_commands_cache = set(click_app.list_commands(ctx))
except Exception:
_typer_commands_cache = set()

return _typer_commands_cache


def _find_first_command(argv):
"""Find the first non-flag argument in argv.

Skips global flags (--json, --verbose, etc.) and their values.
Returns the first positional arg, or None if only flags are present.
"""
# Flags that consume a following value
VALUE_FLAGS = {"--output-format", "-o"}

skip_next = False
for arg in argv:
if skip_next:
skip_next = False
continue
if arg.startswith("-"):
if arg in VALUE_FLAGS:
skip_next = True
continue
return arg # First non-flag arg
return None


def _run_typer(argv):
"""Dispatch to the Typer CLI app."""
from praisonai.cli.app import app, register_commands
register_commands() # idempotent

original = sys.argv
sys.argv = ["praisonai"] + list(argv)
try:
app()
except SystemExit as e:
sys.exit(e.code if isinstance(e.code, int) else 0)
finally:
sys.argv = original
import os

# Only the very first positional token is considered; option values never are.
if not argv or argv[0].startswith("-"):
return False

first = argv[0]

# Check for free-text prompt (contains spaces)
if " " in first:
return True

# Check for YAML file that actually exists on disk
if first.endswith((".yaml", ".yml")) and os.path.isfile(first):
return True

return False
Comment thread
coderabbitai[bot] marked this conversation as resolved.


def _run_legacy(argv):
"""Dispatch to the legacy argparse CLI (prompts, YAML, deprecated flags)."""
from praisonai.cli.main import PraisonAI

original = sys.argv
sys.argv = ["praisonai"] + list(argv)
try:
praison = PraisonAI()
result = praison.main()
code = 0 if result is None else (1 if result is False else 0)
sys.exit(code)
except SystemExit as e:
sys.exit(e.code if isinstance(e.code, int) else 0)
finally:
sys.argv = original


# ---------------------------------------------------------------------------
# Main entry point
# ---------------------------------------------------------------------------

def main():
"""Unified CLI entry point Typer-first, legacy fallback.
"""Unified CLI entry point - Typer is the single dispatcher.

Routing rules (in order):
1. --version / -V → print version and exit
2. --help / -h → Typer help (global or command-level)
3. No arguments → Typer interactive TUI
4. First arg is a Typer cmd→ Typer (auto-discovered from app.py)
5. Everything else → Legacy (prompt, .yaml, deprecated flags)
1. --version / -V → print version and exit
2. Legacy invocation → legacy shim (bare prompts/YAML only)
3. Everything else → Typer (owns all subcommands)
"""
argv = sys.argv[1:]

Expand All @@ -124,30 +58,36 @@ def main():
print(f"PraisonAI version {__version__}")
return

# 2. Help flags → always Typer (global help or command help)
if "--help" in argv or "-h" in argv:
_run_typer(argv)
return

# 3. No arguments → Typer (interactive TUI)
if not argv:
_run_typer(argv)
# 2. Check for legacy invocation (bare prompt/YAML)
if _is_legacy_invocation(argv):
from praisonai.cli.main import PraisonAI
original = sys.argv
sys.argv = ["praisonai"] + list(argv)
try:
praison = PraisonAI()
result = praison.main()
code = 0 if result is None else (1 if result is False else 0)
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

🧩 Analysis chain

🏁 Script executed:

#!/bin/bash
# Description: Check return statements in PraisonAI.main() to verify expected return values.

rg -n -A2 -B2 'def main\(self\)' src/praisonai/praisonai/cli/main.py | head -20
rg -n 'return ' src/praisonai/praisonai/cli/main.py | rg -C2 'class PraisonAI'

Repository: MervinPraison/PraisonAI

Length of output: 233


🏁 Script executed:

# Check the main() method implementation and its return statements
sed -n '328,400p' src/praisonai/praisonai/cli/main.py | cat -n

Repository: MervinPraison/PraisonAI

Length of output: 3959


🏁 Script executed:

# Also check what praison.main() is and how it's used in __main__.py
cat -n src/praisonai/praisonai/__main__.py

Repository: MervinPraison/PraisonAI

Length of output: 3503


🏁 Script executed:

# Find where the main() method ends in cli/main.py
# Start from line 328 and find the next method definition or class end
sed -n '328,600p' src/praisonai/praisonai/cli/main.py | grep -n "^    def \|^class " | head -1

Repository: MervinPraison/PraisonAI

Length of output: 89


🏁 Script executed:

# Get all return statements from the main() method
rg -n "^\s+return" src/praisonai/praisonai/cli/main.py | awk -F: '{if ($1 >= 328) print}'

Repository: MervinPraison/PraisonAI

Length of output: 5179


🏁 Script executed:

# Check the context around string returns to understand intent
sed -n '405,420p' src/praisonai/praisonai/cli/main.py | cat -n
sed -n '730,745p' src/praisonai/praisonai/cli/main.py | cat -n

Repository: MervinPraison/PraisonAI

Length of output: 1991


The exit code logic correctly handles actual return values from praison.main(), despite implicit typing.

The return values from main() are None, False, or success message strings (e.g., "test", "File ... created successfully"). The mapping code = 0 if result is None else (1 if result is False else 0) correctly converts:

  • None → 0 (success)
  • False → 1 (failure)
  • Strings (success messages) → 0 (success)

No True or explicit failure values are returned by main(), so the implicit handling works correctly in practice. Consider adding a type hint or comment documenting the expected return type for clarity.

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@src/praisonai/praisonai/__main__.py` at line 69, Add an explicit return type
annotation and a short comment documenting expected return values for
praison.main() (it returns None, False, or success strings) and clarify the
exit-code mapping near the 'code = 0 if result is None else (1 if result is
False else 0)' line; specifically, annotate praison.main as returning
Optional[Union[bool, str]] (or similar) and add a one-line comment above the
'code = ...' assignment that explains None and string => exit 0, False => exit
1.

sys.exit(code)
except SystemExit as e:
sys.exit(e.code if isinstance(e.code, int) else 0)
finally:
sys.argv = original
return

# 4. Find first non-flag argument and check if it's a Typer command
first_cmd = _find_first_command(argv)

if first_cmd is None:
# Only flags, no command → Typer handles global flags
_run_typer(argv)
return

if first_cmd in _get_typer_commands():
# Known Typer command → Typer
_run_typer(argv)
else:
# Prompt, YAML file, or legacy invocation → legacy
_run_legacy(argv)
# 3. All other invocations → Typer (fail loud on registration errors)
from praisonai.cli.app import app, register_commands
# CRITICAL: Fail loud - do not swallow registration exceptions
register_commands() # Let any ImportError/other exceptions propagate

original = sys.argv
sys.argv = ["praisonai"] + list(argv)
try:
app()
except SystemExit as e:
sys.exit(e.code if isinstance(e.code, int) else 0)
finally:
sys.argv = original


if __name__ == "__main__":
Expand Down
39 changes: 26 additions & 13 deletions src/praisonai/praisonai/_async_bridge.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,18 @@ def get(self) -> asyncio.AbstractEventLoop:
)
self._thread.start()
return self._loop

def get_unlocked(self) -> asyncio.AbstractEventLoop:
"""Get loop assuming caller holds _lock. For run_sync() use only."""
if self._loop is None or self._loop.is_closed():
self._loop = asyncio.new_event_loop()
self._thread = threading.Thread(
target=self._loop.run_forever,
name="praisonai-async",
daemon=False,
)
self._thread.start()
return self._loop

def shutdown(self, timeout: float = 5.0) -> None:
with self._lock:
Expand Down Expand Up @@ -63,12 +75,10 @@ async def _cancel_all() -> None:

def run_sync(coro: Awaitable[T], *, timeout: float | None = _DEFAULT_TIMEOUT) -> T:
"""
Run a coroutine synchronously, safe inside a running loop.
Run a coroutine synchronously using the background loop.

This function automatically detects if there's already a running event loop
and handles the execution appropriately:
- If no loop is running: uses background loop (consistent behavior)
- If a loop is running: schedules on background loop (safe path)
IMPORTANT: This function cannot be called from within a running event loop
as it would cause deadlock. Use 'await coro' directly from async contexts.

Args:
coro: The coroutine to run
Expand All @@ -78,21 +88,24 @@ def run_sync(coro: Awaitable[T], *, timeout: float | None = _DEFAULT_TIMEOUT) ->
The result of the coroutine

Raises:
RuntimeError: If called from within a running event loop
TimeoutError: If timeout is exceeded
Any exception raised by the coroutine
"""
try:
asyncio.get_running_loop()
running = True
except RuntimeError:
running = False
pass
else:
raise RuntimeError(
"run_sync() cannot be called from a running event loop; "
"await the coroutine directly instead."
)

if not running:
# Reuse the background loop instead of creating a new one per call.
fut: Future = asyncio.run_coroutine_threadsafe(coro, _BG.get())
return fut.result(timeout=timeout)

fut = asyncio.run_coroutine_threadsafe(coro, _BG.get())
# Submit the coroutine inside the lock to prevent shutdown races
with _BG._lock:
loop = _BG.get_unlocked()
fut: Future = asyncio.run_coroutine_threadsafe(coro, loop)
return fut.result(timeout=timeout)
Comment on lines 95 to 109
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

🧩 Analysis chain

🏁 Script executed:

#!/bin/bash
# Find all run_sync call sites to confirm which ones lack a running-loop guard.
rg -nP --type=py -C4 '\brun_sync\s*\(' -g '!**/tests/**' -g '!**/test_*.py'

Repository: MervinPraison/PraisonAI

Length of output: 43640


🏁 Script executed:

#!/bin/bash
# 1. Check surrealdb.py __init__ context and how it's instantiated
echo "=== Checking surrealdb.py __init__ and instantiation ==="
rg -nP --type=py -B10 'class.*SurrealDB.*Conversation' src/praisonai/praisonai/persistence/conversation/surrealdb.py | head -30

# 2. Check agents_generator._run_autogen_v4 context and who calls it
echo -e "\n=== Checking agents_generator._run_autogen_v4 context ==="
rg -nP --type=py -B5 'def _run_autogen_v4' src/praisonai/praisonai/agents_generator.py

# 3. Check if astart() calls _run_autogen_v4
echo -e "\n=== Checking if astart calls _run_autogen_v4 ==="
rg -nP --type=py '_run_autogen_v4\(' src/praisonai/praisonai/agents_generator.py

# 4. Check async_mongodb sync wrapper callers to see if they're called from async
echo -e "\n=== Checking async_mongodb class context ==="
rg -nP --type=py -B10 'class.*AsyncMongoDB' src/praisonai/praisonai/persistence/state/async_mongodb.py | head -20

# 5. See the actual a2u_server pattern for comparison
echo -e "\n=== Checking a2u_server pattern (claimed safe) ==="
rg -nP --type=py -B10 -A5 'def publish_sync' src/praisonai/praisonai/endpoints/a2u_server.py

Repository: MervinPraison/PraisonAI

Length of output: 1846


🏁 Script executed:

#!/bin/bash
# Check what calls _run_autogen_v4
echo "=== Who calls _run_autogen_v4? ==="
rg -nP --type=py '_run_autogen_v4\(' src/praisonai/praisonai/agents_generator.py -A2 -B2

# Check if there's an astart method
echo -e "\n=== Looking for astart method ==="
rg -nP --type=py 'async def astart|def astart' src/praisonai/praisonai/agents_generator.py -A3

# Check the a2u_server pattern in full detail
echo -e "\n=== Full a2u_server.publish_sync implementation (claimed safe) ==="
sed -n '161,190p' src/praisonai/praisonai/endpoints/a2u_server.py

# Verify: does the review mention endpoints/a2u_server.py already following safe pattern?
echo -e "\n=== Review's claim about a2u_server pattern ==="
rg -nP --type=py 'try.*ensure_future|asyncio.ensure_future' src/praisonai/praisonai/endpoints/a2u_server.py -B2 -A3

Repository: MervinPraison/PraisonAI

Length of output: 1710


🏁 Script executed:

#!/bin/bash
# Find all methods/properties that call _run_autogen_v4
echo "=== All references to _run_autogen_v4 (not just definition) ==="
rg -nP --type=py '_run_autogen_v4' src/praisonai/praisonai/agents_generator.py

# Find if there are any async entry points in agents_generator
echo -e "\n=== Async methods in agents_generator ==="
rg -nP --type=py 'async def' src/praisonai/praisonai/agents_generator.py | head -20

# Check who instantiates SurrealDBConversationStore
echo -e "\n=== Who instantiates SurrealDBConversationStore ==="
rg -nP --type=py 'SurrealDBConversationStore\(' --glob '!**/tests/**' | head -20

# Check who uses AsyncMongoDBStateStore and how
echo -e "\n=== Who instantiates AsyncMongoDBStateStore ==="
rg -nP --type=py 'AsyncMongoDBStateStore\(' --glob '!**/tests/**' | head -20

# Check if async_mongodb sync methods are called in async contexts
echo -e "\n=== Usage patterns of async_mongodb sync methods (get, set, etc) ==="
rg -nP --type=py '\.(get|set|delete|exists)\(' src/praisonai/praisonai/persistence/state/async_mongodb.py | head -15

Repository: MervinPraison/PraisonAI

Length of output: 1158


🏁 Script executed:

#!/bin/bash
# Check factory.py to see who calls get_conversation_store and get_state_store
echo "=== Factory methods that create stores ==="
rg -nP --type=py 'def get_conversation_store|def get_state_store' src/praisonai/praisonai/persistence/factory.py -A8

# Check if any async method calls these factory functions
echo -e "\n=== Who calls factory methods ==="
rg -nP --type=py 'get_conversation_store|get_state_store' --glob '!**/tests/**' --glob '!factory.py' | head -30

# Double-check: is _run_autogen_v4 ever called (with different patterns)?
echo -e "\n=== Exhaustive search for _run_autogen_v4 calls ==="
rg -nP --type=py '\._run_autogen_v4|run_autogen_v4' src/praisonai/praisonai/agents_generator.py

# Check the full context of where _run_autogen_v4 is defined
echo -e "\n=== Full context of _run_autogen_v4 definition ==="
sed -n '714,850p' src/praisonai/praisonai/agents_generator.py | head -80

Repository: MervinPraison/PraisonAI

Length of output: 4183


🏁 Script executed:

#!/bin/bash
# Key finding: _run_autogen_v4 is never called; run_sync is called FROM _run_autogen_v4 (which is sync)
# This is different than the review claimed.
# Let's verify the actual problematic sites - check if they're used in async contexts

echo "=== Check if SurrealDB store is used in async context ==="
rg -nP --type=py 'async def.*conversation|await.*conversation|await.*store' src/praisonai/praisonai --glob '!**/tests/**' | grep -i surrealdb | head -10

echo -e "\n=== Check if AsyncMongoDB store is actually used (not just in factory) ==="
rg -nP --type=py 'async def.*state|await.*state' src/praisonai/praisonai --glob '!**/tests/**' | head -20

# Let's check: who CALLS factory.get_conversation_store and get_state_store
echo -e "\n=== Direct search for factory method calls ==="
rg -nP --type=py 'from.*factory|import.*factory' src/praisonai/praisonai --glob '!**/tests/**' -l

# Check a specific high-level caller like Agent that might use these
echo -e "\n=== Check if Agent or similar classes use persistence ==="
rg -nP --type=py 'conversation.*store|state.*store' src/praisonai/praisonai/agent.py 2>/dev/null | head -20

Repository: MervinPraison/PraisonAI

Length of output: 1472


🏁 Script executed:

#!/bin/bash
# Check persistence orchestrator to see how stores are used
echo "=== Persistence orchestrator usage ==="
rg -nP --type=py 'get_conversation_store|get_state_store' src/praisonai/praisonai/persistence/orchestrator.py -B3 -A3 | head -40

# Check if orchestrator is used in async contexts
echo -e "\n=== Is orchestrator used async? ==="
rg -nP --type=py 'async def|await.*orchestrator' src/praisonai/praisonai/persistence/orchestrator.py | head -20

# Check CLI persistence feature
echo -e "\n=== CLI persistence feature usage of factory ==="
sed -n '1,100p' src/praisonai/praisonai/cli/features/persistence.py

# Most importantly: verify the actual signature of __init__ in surrealdb.py
echo -e "\n=== SurrealDB __init__ signature and context ==="
sed -n '18,85p' src/praisonai/praisonai/persistence/conversation/surrealdb.py

Repository: MervinPraison/PraisonAI

Length of output: 5508


🏁 Script executed:

#!/bin/bash
# Final verification: Is there any async code that actually calls SurrealDBConversationStore or AsyncMongoDBStateStore?
echo "=== Async context that might instantiate stores ==="
rg -nP --type=py 'async def.*__init__|SurrealDBConversationStore\(|AsyncMongoDBStateStore\(' --glob '!**/tests/**' -B5 | head -50

# Check if the sync methods on AsyncMongoDB are ever awaited (which would be an error)
echo -e "\n=== Check async calls to async_mongodb sync methods ==="
rg -nP --type=py 'await.*\.get\(|await.*\.set\(|await.*\.delete\(' src/praisonai/praisonai --glob '!**/tests/**'

# Verify: what type of code instantiates SurrealDB? Is it async-safe?
echo -e "\n=== Who actually instantiates SurrealDBConversationStore ==="
rg -nP --type=py -B10 -A3 'SurrealDBConversationStore(' src/praisonai/praisonai/persistence/conversation/surrealdb.py

# Check the broader pattern: are there async factory methods or only sync?
echo -e "\n=== Factory method signatures ==="
rg -nP --type=py 'def get_conversation_store|def get_state_store|async def' src/praisonai/praisonai/persistence/factory.py | head -20

Repository: MervinPraison/PraisonAI

Length of output: 7434


🏁 Script executed:

#!/bin/bash
# Check jobs/router.py to see what kind of store it's using
echo "=== Jobs router store setup ==="
rg -nP --type=py 'store\s*=' src/praisonai/praisonai/jobs/router.py -B5 | head -30

# Check if store in jobs/router is AsyncMongoDBStateStore or another type
echo -e "\n=== Store initialization in jobs ==="
sed -n '1,100p' src/praisonai/praisonai/jobs/router.py | grep -A10 -B10 'def\|store'

# Key check: are the sync wrapper methods on async_mongodb actually designed to be awaited?
echo -e "\n=== AsyncMongoDB sync method signature ==="
sed -n '97,105p' src/praisonai/praisonai/persistence/state/async_mongodb.py

# Check if store is ever used in a way that expects async
echo -e "\n=== How is the store passed to router? ==="
rg -nP --type=py 'router.*store|store.*router' src/praisonai/praisonai/jobs --glob '!**/tests/**' -B3 -A3 | head -40

Repository: MervinPraison/PraisonAI

Length of output: 3551


🏁 Script executed:

#!/bin/bash
# Critical: check what JobStore is and what get_store() returns
echo "=== JobStore base class and get_store implementation ==="
rg -nP --type=py 'class JobStore|def get_store' src/praisonai/praisonai/jobs --glob '!**/tests/**' -A10 | head -50

# Check the store.py file
echo -e "\n=== jobs/store.py content ==="
head -100 src/praisonai/praisonai/jobs/store.py

# The key insight: check if sync wrappers are ACTUALLY being called from async code
# Let's look at jobs/router.py more carefully
echo -e "\n=== Full view of how store methods are used in router ==="
sed -n '45,75p' src/praisonai/praisonai/jobs/router.py

Repository: MervinPraison/PraisonAI

Length of output: 5947


🏁 Script executed:

#!/bin/bash
# Key clarification: is AsyncMongoDBStateStore actually USED anywhere, or just defined?
echo "=== Real usage of AsyncMongoDBStateStore (not just definition/import) ==="
rg -nP --type=py 'AsyncMongoDBStateStore' src/praisonai/praisonai --glob '!**/tests/**' --glob '!factory.py' --glob '!async_mongodb.py'

# Check if persistence factory methods are called from async contexts
echo -e "\n=== Who calls get_conversation_store or get_state_store ==="
rg -nP --type=py 'get_conversation_store|get_state_store' src/praisonai/praisonai --glob '!**/tests/**' --glob '!factory.py' | head -30

# Most importantly: Is _run_autogen_v4 ever called in the actual code?
echo -e "\n=== Exhaustive check: any call to _run_autogen_v4 method ==="
rg -nP --type=py 'self\._run_autogen_v4|gen\._run_autogen_v4' src/praisonai/praisonai --glob '!**/tests/**'

# The agents_generator line 817 - what calls the _run_autogen_v4 method that contains this?
echo -e "\n=== Who calls _run_autogen_v4 method that has run_sync on line 817? ==="
rg -nP --type=py '\._run_autogen_v4\(' src/praisonai/praisonai --glob '!**/tests/**' -B5 -A5

Repository: MervinPraison/PraisonAI

Length of output: 337


🏁 Script executed:

#!/bin/bash
# Exit code 1 means no results found. Verify the code is indeed dead/unused
echo "=== Verify: is _run_autogen_v4 truly dead code? ==="
rg -nP --type=py 'run_autogen_v4' src/praisonai/praisonai/agents_generator.py

# Check for any reference to these stores in the entire codebase (excluding factory, class definition, imports)
echo -e "\n=== All mentions of SurrealDBConversationStore or AsyncMongoDBStateStore ==="
rg -nP --type=py 'SurrealDBConversationStore|AsyncMongoDBStateStore' src/praisonai/praisonai --glob '!**/tests/**'

# Check if factory.py methods are called anywhere
echo -e "\n=== Verify: factory.py is it even imported or used? ==="
rg -nP --type=py 'from.*factory|import.*factory|factory\.get_' src/praisonai/praisonai --glob '!**/tests/**'

# So what DOES call run_sync() in reality?
echo -e "\n=== Actual run_sync callers (non-test) ==="
rg -nP --type=py '\brun_sync\(' src/praisonai/praisonai --glob '!**/tests/**' | cut -d: -f1 | sort -u

Repository: MervinPraison/PraisonAI

Length of output: 3682


🏁 Script executed:

#!/bin/bash
# Check persistence/__init__.py to see if stores can be created in async contexts
echo "=== persistence/__init__.py usage of factory ==="
sed -n '60,85p' src/praisonai/praisonai/persistence/__init__.py

# Check cli/features/persistence.py usage
echo -e "\n=== CLI persistence.py usage context ==="
rg -nP --type=py -B5 -A5 'create_conversation_store|create_state_store' src/praisonai/praisonai/cli/features/persistence.py | head -50

# The critical question: can these stores be used from async endpoints?
# Check if any async handler or endpoint could call persistence methods
echo -e "\n=== Check: can async code instantiate stores? ==="
rg -nP --type=py 'async def.*store|create_stores_from_config' src/praisonai/praisonai/persistence/orchestrator.py -B3 -A10 | head -40

# List all persistence store callers - are they sync or async?
echo -e "\n=== Orchestrator how is it used? ==="
rg -nP --type=py 'orchestrator|from.*orchestrator' src/praisonai/praisonai --glob '!**/tests/**' --glob '!orchestrator.py'

Repository: MervinPraison/PraisonAI

Length of output: 19805


🏁 Script executed:

#!/bin/bash
# Final critical check: Is PersistenceOrchestrator ever instantiated in an async context?
echo "=== Is PersistenceOrchestrator instantiated in async functions? ==="
rg -nP --type=py 'async def.*' src/praisonai/praisonai/persistence/hooks/agent_hooks.py -A20 | grep -A15 'PersistenceOrchestrator'

# Check if wrap_agent_with_persistence or PersistentAgent are called from async
echo -e "\n=== Who calls wrap_agent_with_persistence or PersistentAgent? ==="
rg -nP --type=py 'wrap_agent_with_persistence|PersistentAgent' src/praisonai/praisonai --glob '!**/tests/**' --glob '!agent_hooks.py' | head -20

# Check if any async code creates stores directly
echo -e "\n=== Async code that might create stores directly ==="
rg -nP --type=py 'async def.*' src/praisonai/praisonai -A10 | grep -B5 'create_conversation_store\|create_state_store\|SurrealDBConversationStore\|AsyncMongoDBStateStore'

Repository: MervinPraison/PraisonAI

Length of output: 1064


Run_sync() hardening breaks sync wrappers in async contexts—but scope is narrower than stated.

The new check unconditionally raises RuntimeError when called from a running event loop. However, your three examples have different risk profiles:

  1. agents_generator.py:817_run_autogen_v4() is dead code; it's never called anywhere in the codebase. No action needed.

  2. surrealdb.py:78 — Only instantiated via the sync factory (factory.py, line 63), which is CLI-driven. No evidence of async instantiation in the codebase today. Low risk, but the learnings require sync/async conversions to respect the caller's execution context; init blocking the event loop would be unsafe if instantiation ever occurs async.

  3. async_mongodb.py — More serious: sync wrapper methods (get, set, delete, etc.) unconditionally call run_sync(), violating the learnings principle that "all operations must fire in the current execution context." If these sync methods are ever called from async code (which the method names invite), they will now fail. The async variants (async_get, async_set, etc.) exist and should be used instead in async contexts.

Recommended fix: For async_mongodb, either rename sync wrappers to discourage async use (e.g., get_sync()), add runtime guards like a2u_server.publish_sync does, or remove them and require explicit await calls. For surrealdb, add a guard only if async instantiation becomes possible.

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@src/praisonai/praisonai/_async_bridge.py` around lines 95 - 109, The new
run_sync() check breaks the sync wrappers in async_mongodb because methods like
get, set, delete unconditionally call run_sync() even when invoked from a
running event loop; update async_mongodb so each sync wrapper (get, set, delete,
etc.) detects an active event loop (via asyncio.get_running_loop()) and either
(a) raise a clear RuntimeError directing callers to use the async counterparts
(async_get, async_set, async_delete) or (b) rename the sync wrappers (e.g.,
get_sync, set_sync, delete_sync) to discourage async use — pick one strategy and
apply it consistently; for surrealdb, only add a similar guard inside __init__
or its sync factory path if/when async instantiation becomes supported to avoid
blocking an event loop.



Expand Down
24 changes: 6 additions & 18 deletions src/praisonai/praisonai/agents_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,8 @@
import keyword

# Import new architecture components
from .framework_adapters import (
FrameworkAdapter, CrewAIAdapter, AutoGenAdapter,
AutoGenV4Adapter, AG2Adapter, PraisonAIAdapter
)
from .framework_adapters.base import FrameworkAdapter
from .framework_adapters.registry import FrameworkAdapterRegistry
from .tool_registry import ToolRegistry

# Import availability flags
Expand Down Expand Up @@ -51,14 +49,8 @@
except ImportError:
pass

# Registry of available adapters (lazy-loaded)
FRAMEWORK_ADAPTERS = {
"crewai": CrewAIAdapter,
"autogen": AutoGenAdapter,
"autogen_v4": AutoGenV4Adapter,
"ag2": AG2Adapter,
"praisonai": PraisonAIAdapter
}
# Framework adapter registry - now uses proper registry pattern
# This replaces the hardcoded FRAMEWORK_ADAPTERS dict

# Note: OTEL_SDK_DISABLED moved to CLI entry point per issue requirements

Expand Down Expand Up @@ -258,12 +250,8 @@ def _get_framework_adapter(self, framework: str) -> FrameworkAdapter:
Raises:
ValueError: If framework is not supported
"""
if framework not in FRAMEWORK_ADAPTERS:
raise ValueError(f"Unsupported framework: {framework}. "
f"Supported frameworks: {list(FRAMEWORK_ADAPTERS.keys())}")

adapter_class = FRAMEWORK_ADAPTERS[framework]
return adapter_class()
adapter_registry = FrameworkAdapterRegistry.get_instance()
return adapter_registry.create(framework)

def _merge_cli_config(self, config, cli_config):
"""
Expand Down
Loading