diff --git a/examples/python/managed-agents/provider/all_providers.py b/examples/python/managed-agents/provider/all_providers.py index b3b94e600..733efa6bf 100644 --- a/examples/python/managed-agents/provider/all_providers.py +++ b/examples/python/managed-agents/provider/all_providers.py @@ -1,7 +1,7 @@ """All compute providers — comprehensive test across Local, Docker, E2B, and Modal. -This example mirrors the Anthropic app.py but uses the local provider with -various compute backends instead of Anthropic's managed infrastructure. +This example demonstrates local agent loops with various compute backends for tool sandboxing. +Uses the new LocalAgent class to clearly indicate local execution with optional cloud compute. Requires: - Docker running locally @@ -9,7 +9,8 @@ - modal CLI configured (modal token set) """ import asyncio -from praisonai import Agent, ManagedAgent, LocalManagedConfig +from praisonai import Agent +from praisonai.integrations import LocalAgent, LocalAgentConfig async def test_provider(name, compute, extra_provision_kwargs=None): @@ -18,10 +19,9 @@ async def test_provider(name, compute, extra_provision_kwargs=None): print(f" PROVIDER: {name}") print(f"{'='*60}") - managed = ManagedAgent( - provider="local", + managed = LocalAgent( compute=compute, - config=LocalManagedConfig( + config=LocalAgentConfig( model="gpt-4o-mini", system="You are a helpful assistant. Be concise.", name=f"{name}Agent", diff --git a/examples/python/managed-agents/provider/local_basic.py b/examples/python/managed-agents/provider/local_basic.py index be26f8721..3c240761a 100644 --- a/examples/python/managed-agents/provider/local_basic.py +++ b/examples/python/managed-agents/provider/local_basic.py @@ -1,13 +1,14 @@ -"""Local provider — basic managed agent with gpt-4o-mini. +"""Local agent — basic local execution with gpt-4o-mini. No external infrastructure needed. Runs the agent loop locally. +Uses the new canonical LocalAgent class for clarity. """ -from praisonai import Agent, ManagedAgent, LocalManagedConfig +from praisonai import Agent +from praisonai.integrations import LocalAgent, LocalAgentConfig -# Create a local managed agent (auto-detects local when no ANTHROPIC_API_KEY) -managed = ManagedAgent( - provider="local", - config=LocalManagedConfig( +# Create a local agent (runs locally, no managed runtime) +managed = LocalAgent( + config=LocalAgentConfig( model="gpt-4o-mini", system="You are a helpful assistant. Be concise.", name="LocalAgent", diff --git a/examples/python/managed-agents/provider/runtime_hosted_anthropic.py b/examples/python/managed-agents/provider/runtime_hosted_anthropic.py new file mode 100644 index 000000000..97a8feca8 --- /dev/null +++ b/examples/python/managed-agents/provider/runtime_hosted_anthropic.py @@ -0,0 +1,48 @@ +"""Anthropic hosted runtime — entire agent runs on Anthropic's managed infrastructure. + +Uses the new canonical HostedAgent class which clearly communicates that the entire +agent loop runs in Anthropic's cloud, not locally. +""" +from praisonai import Agent +from praisonai.integrations import HostedAgent, HostedAgentConfig + +# Create a hosted agent running entirely on Anthropic's managed runtime +hosted = HostedAgent( + provider="anthropic", + config=HostedAgentConfig( + model="claude-3-5-sonnet-latest", + system="You are a helpful coding assistant. Be concise.", + name="AnthropicHostedAgent", + tools=[{"type": "agent_toolset_20260401"}], + ), +) + +agent = Agent(name="anthropic-hosted", backend=hosted) + +# 1. Basic execution - runs entirely in Anthropic's cloud +print("[1] Hosted execution on Anthropic infrastructure...") +result = agent.start("What is the capital of France? One word.", stream=True) +print(f" Result: {result}") + +# 2. Agent metadata from Anthropic's API +print(f"\n[2] Agent ID: {hosted.agent_id}") +print(f" Version: {hosted.agent_version}") +print(f" Env ID: {hosted.environment_id}") +print(f" Session: {hosted.session_id}") + +# 3. Multi-turn (same session keeps context in Anthropic's cloud) +print("\n[3] Multi-turn conversation...") +result = agent.start("What country is that city in?", stream=True) +print(f" Result: {result}") + +# 4. Usage tracking from Anthropic's usage API +info = hosted.retrieve_session() +print(f"\n[4] Usage: in={info['usage']['input_tokens']}, out={info['usage']['output_tokens']}") + +# 5. List all sessions for this agent +sessions = hosted.list_sessions() +print(f"\n[5] Sessions: {len(sessions)}") +for s in sessions[:3]: # Show first 3 + print(f" {s['id']} | {s['status']}") + +print("\nDone! Agent loop ran entirely on Anthropic's managed infrastructure.") \ No newline at end of file diff --git a/examples/python/managed-agents/provider/runtime_local_gemini.py b/examples/python/managed-agents/provider/runtime_local_gemini.py new file mode 100644 index 000000000..e4f303bf7 --- /dev/null +++ b/examples/python/managed-agents/provider/runtime_local_gemini.py @@ -0,0 +1,46 @@ +"""Local agent loop with Gemini LLM — runs locally, not in a managed runtime. + +Uses the new canonical LocalAgent class which clearly communicates that only the +agent loop runs locally. The LLM calls go to Google's Gemini API, but there's no managed runtime involved. +""" +from praisonai import Agent +from praisonai.integrations import LocalAgent, LocalAgentConfig + +# Create a local agent using Gemini LLM +local = LocalAgent( + config=LocalAgentConfig( + model="gemini/gemini-2.0-flash", # Use Gemini with litellm routing prefix + system="You are a helpful coding assistant. Be concise.", + name="LocalGeminiAgent", + tools=["execute_command", "read_file", "write_file"], + ), +) + +agent = Agent(name="local-gemini", backend=local) + +# 1. Basic execution - agent loop runs locally, LLM calls go to Gemini +print("[1] Local execution with Gemini LLM...") +result = agent.start("What is the capital of France? One word.", stream=True) +print(f" Result: {result}") + +# 2. Agent metadata (locally generated UUIDs) +print(f"\n[2] Agent ID: {local.agent_id}") +print(f" Version: {local.agent_version}") +print(f" Env ID: {local.environment_id}") +print(f" Session: {local.session_id}") + +# 3. Multi-turn conversation (session state maintained locally) +print("\n[3] Multi-turn conversation...") +result = agent.start("What country is that city in?", stream=True) +print(f" Result: {result}") + +# 4. Usage tracking (accumulated locally) +info = local.retrieve_session() +print(f"\n[4] Usage: in={info['usage']['input_tokens']}, out={info['usage']['output_tokens']}") + +# 5. Tool execution (runs in local subprocess) +print("\n[5] Tool execution example...") +result = agent.start("Create a file called hello_gemini.txt with 'Hello from Gemini agent!'", stream=True) +print(f" Tool result: {result}") + +print("\nDone! Agent loop ran locally with Gemini LLM calls.") \ No newline at end of file diff --git a/examples/python/managed-agents/provider/runtime_local_ollama.py b/examples/python/managed-agents/provider/runtime_local_ollama.py new file mode 100644 index 000000000..a2945581f --- /dev/null +++ b/examples/python/managed-agents/provider/runtime_local_ollama.py @@ -0,0 +1,46 @@ +"""Local agent loop with Ollama LLM — runs locally, not in a managed runtime. + +Uses the new canonical LocalAgent class which clearly communicates that only the +agent loop runs locally. The LLM calls go to a local Ollama instance, no managed runtime involved. +""" +from praisonai import Agent +from praisonai.integrations import LocalAgent, LocalAgentConfig + +# Create a local agent using Ollama LLM +local = LocalAgent( + config=LocalAgentConfig( + model="ollama/llama3.2", # Use Ollama with litellm routing prefix + system="You are a helpful coding assistant. Be concise.", + name="LocalOllamaAgent", + tools=["execute_command", "read_file", "write_file"], + ), +) + +agent = Agent(name="local-ollama", backend=local) + +# 1. Basic execution - agent loop runs locally, LLM calls go to local Ollama +print("[1] Local execution with Ollama LLM...") +result = agent.start("What is the capital of France? One word.", stream=True) +print(f" Result: {result}") + +# 2. Agent metadata (locally generated UUIDs) +print(f"\n[2] Agent ID: {local.agent_id}") +print(f" Version: {local.agent_version}") +print(f" Env ID: {local.environment_id}") +print(f" Session: {local.session_id}") + +# 3. Multi-turn conversation (session state maintained locally) +print("\n[3] Multi-turn conversation...") +result = agent.start("What country is that city in?", stream=True) +print(f" Result: {result}") + +# 4. Usage tracking (accumulated locally) +info = local.retrieve_session() +print(f"\n[4] Usage: in={info['usage']['input_tokens']}, out={info['usage']['output_tokens']}") + +# 5. Tool execution (runs in local subprocess) +print("\n[5] Tool execution example...") +result = agent.start("Create a file called hello_ollama.txt with 'Hello from Ollama agent!'", stream=True) +print(f" Tool result: {result}") + +print("\nDone! Agent loop ran locally with Ollama LLM calls.") \ No newline at end of file diff --git a/examples/python/managed-agents/provider/runtime_local_openai.py b/examples/python/managed-agents/provider/runtime_local_openai.py new file mode 100644 index 000000000..a68bff4ba --- /dev/null +++ b/examples/python/managed-agents/provider/runtime_local_openai.py @@ -0,0 +1,46 @@ +"""Local agent loop with OpenAI LLM — runs locally, not in a managed runtime. + +Uses the new canonical LocalAgent class which clearly communicates that only the +agent loop runs locally. The LLM calls go to OpenAI, but there's no managed runtime involved. +""" +from praisonai import Agent +from praisonai.integrations import LocalAgent, LocalAgentConfig + +# Create a local agent using OpenAI's LLM +local = LocalAgent( + config=LocalAgentConfig( + model="gpt-4o-mini", + system="You are a helpful coding assistant. Be concise.", + name="LocalOpenAIAgent", + tools=["execute_command", "read_file", "write_file"], + ), +) + +agent = Agent(name="local-openai", backend=local) + +# 1. Basic execution - agent loop runs locally, LLM calls go to OpenAI +print("[1] Local execution with OpenAI LLM...") +result = agent.start("What is the capital of France? One word.", stream=True) +print(f" Result: {result}") + +# 2. Agent metadata (locally generated UUIDs) +print(f"\n[2] Agent ID: {local.agent_id}") +print(f" Version: {local.agent_version}") +print(f" Env ID: {local.environment_id}") +print(f" Session: {local.session_id}") + +# 3. Multi-turn conversation (session state maintained locally) +print("\n[3] Multi-turn conversation...") +result = agent.start("What country is that city in?", stream=True) +print(f" Result: {result}") + +# 4. Usage tracking (accumulated locally) +info = local.retrieve_session() +print(f"\n[4] Usage: in={info['usage']['input_tokens']}, out={info['usage']['output_tokens']}") + +# 5. Tool execution (runs in local subprocess) +print("\n[5] Tool execution example...") +result = agent.start("Create a file called hello.txt with 'Hello from local agent!'", stream=True) +print(f" Tool result: {result}") + +print("\nDone! Agent loop ran locally with OpenAI LLM calls.") \ No newline at end of file diff --git a/src/praisonai/praisonai/__init__.py b/src/praisonai/praisonai/__init__.py index bd9c1b0dd..8f20dab6a 100644 --- a/src/praisonai/praisonai/__init__.py +++ b/src/praisonai/praisonai/__init__.py @@ -27,6 +27,11 @@ 'LocalManagedConfig', # backward compat alias 'SandboxedAgent', # new honest name 'SandboxedAgentConfig', # new honest name + # New canonical agent backends + 'HostedAgent', + 'HostedAgentConfig', + 'LocalAgent', + 'LocalAgentConfig', ] # Telemetry initialization state @@ -123,6 +128,19 @@ def __getattr__(name): elif name in ('ManagedConfig', 'ManagedBackendConfig'): from .integrations.managed_agents import ManagedConfig return ManagedConfig + # New canonical agent backends + elif name == 'HostedAgent': + from .integrations.hosted_agent import HostedAgent + return HostedAgent + elif name == 'HostedAgentConfig': + from .integrations.hosted_agent import HostedAgentConfig + return HostedAgentConfig + elif name == 'LocalAgent': + from .integrations.local_agent import LocalAgent + return LocalAgent + elif name == 'LocalAgentConfig': + from .integrations.local_agent import LocalAgentConfig + return LocalAgentConfig elif name in ('DB', 'PraisonAIDB', 'PraisonDB'): from .db.adapter import DB return DB diff --git a/src/praisonai/praisonai/integrations/__init__.py b/src/praisonai/praisonai/integrations/__init__.py index ae2157867..59458eedf 100644 --- a/src/praisonai/praisonai/integrations/__init__.py +++ b/src/praisonai/praisonai/integrations/__init__.py @@ -43,6 +43,11 @@ 'SandboxedAgentConfig', # new honest name 'ManagedAgentIntegration', # backward compat alias 'ManagedBackendConfig', # backward compat alias + # New canonical agent backends + 'HostedAgent', + 'HostedAgentConfig', + 'LocalAgent', + 'LocalAgentConfig', 'get_available_integrations', 'ExternalAgentRegistry', 'get_registry', @@ -107,4 +112,17 @@ def __getattr__(name): elif name == 'create_integration': from .registry import create_integration return create_integration + # New canonical agent backends + elif name == 'HostedAgent': + from .hosted_agent import HostedAgent + return HostedAgent + elif name == 'HostedAgentConfig': + from .hosted_agent import HostedAgentConfig + return HostedAgentConfig + elif name == 'LocalAgent': + from .local_agent import LocalAgent + return LocalAgent + elif name == 'LocalAgentConfig': + from .local_agent import LocalAgentConfig + return LocalAgentConfig raise AttributeError(f"module {__name__!r} has no attribute {name!r}") diff --git a/src/praisonai/praisonai/integrations/hosted_agent.py b/src/praisonai/praisonai/integrations/hosted_agent.py new file mode 100644 index 000000000..9af1d3503 --- /dev/null +++ b/src/praisonai/praisonai/integrations/hosted_agent.py @@ -0,0 +1,93 @@ +""" +Hosted Agent — canonical name for cloud-based agent runtime backends. + +This is the new canonical implementation that replaces the overloaded +`ManagedAgent(provider="anthropic")` pattern. Currently aliases AnthropicManagedAgent +but provides a clear semantic distinction: the entire agent loop runs on a remote +managed runtime (Anthropic's cloud infrastructure). + +Implements ``ManagedBackendProtocol`` from the Core SDK. + +Usage:: + + from praisonai.integrations import HostedAgent, HostedAgentConfig + from praisonaiagents import Agent + + # Hosted loop — entire agent runs on Anthropic's managed runtime + agent = Agent(name="a", backend=HostedAgent( + provider="anthropic", + config=HostedAgentConfig( + model="claude-3-5-sonnet-latest", + system="You are a concise assistant.", + ), + )) + +Architecture: + - Runtime provider axis: anthropic (only supported today), e2b, modal, flyio (future) + - Agent loop runs entirely in the cloud provider's managed runtime + - Tools are co-located with the provider infrastructure +""" + +from typing import Optional, Any +from .managed_agents import AnthropicManagedAgent, ManagedConfig + + +# Use the existing ManagedConfig as HostedAgentConfig for now +# This preserves all current functionality while providing the new semantic naming +HostedAgentConfig = ManagedConfig + + +class HostedAgent(AnthropicManagedAgent): + """Canonical hosted agent backend for cloud-based managed runtimes. + + Currently supports only Anthropic's managed runtime, but designed to extend + cleanly to other providers (E2B-Managed, Modal-Managed, etc.) in the future. + + Key semantic distinction: the **entire agent loop** runs on the provider's + cloud infrastructure, including tools, context, and execution environment. + + Args: + provider: Runtime provider name. Currently only "anthropic" is supported. + Future: "e2b", "modal", "flyio" when those runtimes are available. + config: HostedAgentConfig with model, system prompt, tools, etc. + **kwargs: Additional arguments passed to the underlying provider implementation. + + Raises: + ValueError: If the specified provider is not available as a managed runtime. + """ + + def __init__( + self, + provider: str = "anthropic", + config: Optional[Any] = None, + **kwargs, + ): + if provider != "anthropic": + # Provide differentiated guidance based on provider type + _llm_hints = {"openai", "gemini", "ollama", "local"} + _compute_hints = {"e2b", "modal", "flyio", "daytona", "docker"} + + if provider in _llm_hints: + hint = ( + f"For local agent loops with this LLM, use: " + f"LocalAgent(config=LocalAgentConfig(model='...')) " + f"(e.g. 'gpt-4o-mini', 'gemini/gemini-2.0-flash', 'ollama/llama3')." + ) + elif provider in _compute_hints: + hint = ( + f"For local execution with cloud compute, use: " + f"LocalAgent(compute='{provider}', config=LocalAgentConfig(...))" + ) + else: + hint = ( + "Use LocalAgent(config=LocalAgentConfig(model='...')) for local loops, " + "or LocalAgent(compute='e2b'|'modal'|'docker'|...) for cloud-sandboxed tools." + ) + + raise ValueError( + f"Managed runtime for provider '{provider}' is not yet available. " + f"Currently supported: 'anthropic'. {hint}" + ) + + # Pass through to the existing Anthropic implementation + super().__init__(provider=provider, config=config, **kwargs) \ No newline at end of file diff --git a/src/praisonai/praisonai/integrations/local_agent.py b/src/praisonai/praisonai/integrations/local_agent.py new file mode 100644 index 000000000..55d421e57 --- /dev/null +++ b/src/praisonai/praisonai/integrations/local_agent.py @@ -0,0 +1,86 @@ +""" +Local Agent — canonical name for local agent loop with optional cloud compute. + +This is the new canonical implementation that replaces the overloaded +`ManagedAgent(provider="openai"/"gemini"/...)` pattern. The agent loop runs +locally but can optionally use cloud compute providers for tool sandboxing. + +Implements ``ManagedBackendProtocol`` from the Core SDK. + +Usage:: + + from praisonai.integrations import LocalAgent, LocalAgentConfig + from praisonaiagents import Agent + + # Local loop, tools optional-sandboxed in cloud compute + agent = Agent(name="b", backend=LocalAgent( + compute="e2b", # or "modal", "flyio", "daytona", "docker", None + config=LocalAgentConfig( + model="gpt-4o-mini", # LLM choice here — not provider= + system="You are a concise assistant.", + ), + )) + + # Smallest footprint: local loop + local subprocess + agent = Agent(name="c", backend=LocalAgent( + config=LocalAgentConfig(model="gpt-4o-mini"), + )) + +Architecture: + - Agent loop runs locally in the current process + - LLM selection via model= (supports litellm routing like "gemini/...", "ollama/...") + - Optional compute= for cloud tool sandboxing (E2B, Modal, Docker, etc.) + - No provider= overload — clean separation of concerns +""" + +import warnings +from typing import Optional, Any +from .managed_local import LocalManagedAgent, LocalManagedConfig + + +# Use the existing LocalManagedConfig as LocalAgentConfig for now +# This preserves all current functionality while providing the new semantic naming +LocalAgentConfig = LocalManagedConfig + + +class LocalAgent(LocalManagedAgent): + """Canonical local agent backend with optional cloud compute. + + Key semantic distinction: the **agent loop runs locally** in your process. + Only tools can optionally be executed in a cloud compute environment for sandboxing. + + Args: + compute: Optional compute provider for tool sandboxing. + Can be "e2b", "modal", "flyio", "daytona", "docker", or None (local subprocess). + config: LocalAgentConfig with model, system prompt, tools, etc. + **kwargs: Additional arguments passed to the underlying local implementation. + + Note: + The legacy provider= parameter is not supported on LocalAgent constructors. + Use config.model= for LLM selection (e.g., "gpt-4o", "gemini/gemini-2.0-flash", "ollama/llama3"). + """ + + def __init__( + self, + compute: Optional[Any] = None, + config: Optional[Any] = None, + **kwargs, + ): + # Reject the provider= overload pattern to force clean usage + provider_for_routing = "local" # Default provider for model routing + if 'provider' in kwargs: + provider_value = kwargs.pop('provider') + warnings.warn( + f"LocalAgent() does not accept provider='{provider_value}'. " + f"Use config.model= for LLM selection instead. " + f"For example: LocalAgentConfig(model='gpt-4o-mini') or " + f"LocalAgentConfig(model='gemini/gemini-2.0-flash')", + DeprecationWarning, + stacklevel=2 + ) + # Preserve the provider value for LLM routing to maintain backward compatibility + # This ensures _resolve_model() can still apply proper prefixes (ollama/, gemini/, etc.) + provider_for_routing = provider_value + + # Pass compute= as the compute parameter and provider for LLM routing + super().__init__(compute=compute, config=config, provider=provider_for_routing, **kwargs) \ No newline at end of file diff --git a/src/praisonai/praisonai/integrations/managed_agents.py b/src/praisonai/praisonai/integrations/managed_agents.py index a5de145be..594da57df 100644 --- a/src/praisonai/praisonai/integrations/managed_agents.py +++ b/src/praisonai/praisonai/integrations/managed_agents.py @@ -30,6 +30,7 @@ import asyncio import logging import os +import warnings from dataclasses import dataclass, field from typing import AsyncIterator, Callable, Dict, Any, Optional, List, Union from enum import Enum @@ -1074,48 +1075,72 @@ def ManagedAgent( provider: Optional[str] = None, **kwargs, ): - """Factory that returns the appropriate managed agent backend. - - Provider auto-detection: - - ``ANTHROPIC_API_KEY`` set → ``AnthropicManagedAgent`` - - Otherwise → ``LocalManagedAgent`` - - Explicit providers: - - ``"anthropic"`` → ``AnthropicManagedAgent`` - - ``"local"`` → ``LocalManagedAgent`` (any LLM via litellm) - - ``"openai"`` → ``LocalManagedAgent`` with OpenAI model - - ``"ollama"`` → ``LocalManagedAgent`` with Ollama prefix - - ``"gemini"`` → ``LocalManagedAgent`` with Gemini prefix - - Examples:: - - # Auto-detect (Anthropic if key set, local otherwise) - managed = ManagedAgent() - - # Explicit Anthropic - managed = ManagedAgent(provider="anthropic", config=ManagedConfig(...)) - - # Explicit local with OpenAI - managed = ManagedAgent(provider="openai", config=LocalManagedConfig(model="gpt-4o")) - - # Ollama - managed = ManagedAgent(provider="ollama", config=LocalManagedConfig(model="llama3")) - + """Deprecated factory. Use HostedAgent or LocalAgent explicitly. + + DEPRECATION NOTICE: This factory conflates hosted-runtime vs LLM-routing. + + New canonical usage: + - For hosted runtimes: HostedAgent(provider="anthropic", ...) + - For local loops: LocalAgent(config=LocalAgentConfig(model="gpt-4o-mini"), ...) + + Legacy behavior (deprecated): + - provider="anthropic" → HostedAgent(provider="anthropic", ...) + - provider in {"openai","gemini","ollama","local"} → LocalAgent(...) + (DeprecationWarning: "use LocalAgent directly; put LLM name in model=") + - provider in {"e2b","modal","flyio","daytona","docker"} → raise ValueError + ("Cloud compute belongs on LocalAgent(compute=...). Hosted runtimes for + these providers are not yet available.") + Returns: An instance satisfying ``ManagedBackendProtocol``. + + Raises: + ValueError: For compute-provider names that should use LocalAgent(compute=). """ - if provider is None: + # Track if provider was auto-detected to avoid spurious deprecation warnings + auto_detected = provider is None + if auto_detected: # Auto-detect if os.getenv("ANTHROPIC_API_KEY") or os.getenv("CLAUDE_API_KEY"): provider = "anthropic" else: provider = "local" + # Hosted runtime provider if provider == "anthropic": return AnthropicManagedAgent(provider=provider, **kwargs) - else: + + # Compute provider names - maintain backward compatibility by passing to LocalManagedAgent + elif provider in {"e2b", "modal", "flyio", "daytona", "docker"}: + warnings.warn( + f"ManagedAgent(provider='{provider}') for compute providers is deprecated. " + f"Use LocalAgent(compute='{provider}', config=LocalAgentConfig(...)) instead.", + DeprecationWarning, + stacklevel=2 + ) + from .managed_local import LocalManagedAgent + return LocalManagedAgent(provider="local", compute=provider, **kwargs) + + # LLM routing hints (deprecated usage) - only warn if explicitly passed by user + elif provider in {"openai", "gemini", "ollama", "local"}: + if not auto_detected: + warnings.warn( + f"ManagedAgent(provider='{provider}') is deprecated. " + f"Use LocalAgent directly with model= instead: " + f"LocalAgent(config=LocalAgentConfig(model='gpt-4o-mini'))", + DeprecationWarning, + stacklevel=2 + ) from .managed_local import LocalManagedAgent return LocalManagedAgent(provider=provider, **kwargs) + + # Unknown provider + else: + raise ValueError( + f"Unknown provider '{provider}'. " + f"Supported: 'anthropic' for hosted runtime, " + f"or use LocalAgent(config=LocalAgentConfig(model='your-model')) for local execution." + ) # ── Backward-compatible aliases ── diff --git a/tests/unit/integrations/test_backend_semantics.py b/tests/unit/integrations/test_backend_semantics.py new file mode 100644 index 000000000..66e670f42 --- /dev/null +++ b/tests/unit/integrations/test_backend_semantics.py @@ -0,0 +1,192 @@ +""" +Test semantic correctness of the new HostedAgent/LocalAgent split. + +Ensures that the provider= overload fix maintains all backward compatibility +while clearly distinguishing hosted runtime from local execution semantics. +""" +import pytest +import warnings +from unittest.mock import patch, MagicMock + +def test_hosted_agent_imports(): + """Test that new HostedAgent classes can be imported.""" + from praisonai.integrations import HostedAgent, HostedAgentConfig + assert HostedAgent is not None + assert HostedAgentConfig is not None + + # Also test top-level imports + from praisonai import HostedAgent as TopLevelHostedAgent + from praisonai import HostedAgentConfig as TopLevelHostedAgentConfig + assert TopLevelHostedAgent is not None + assert TopLevelHostedAgentConfig is not None + + +def test_local_agent_imports(): + """Test that new LocalAgent classes can be imported.""" + from praisonai.integrations import LocalAgent, LocalAgentConfig + assert LocalAgent is not None + assert LocalAgentConfig is not None + + # Also test top-level imports + from praisonai import LocalAgent as TopLevelLocalAgent + from praisonai import LocalAgentConfig as TopLevelLocalAgentConfig + assert TopLevelLocalAgent is not None + assert TopLevelLocalAgentConfig is not None + + +def test_hosted_agent_only_accepts_anthropic(): + """Test that HostedAgent only accepts 'anthropic' as provider.""" + from praisonai.integrations import HostedAgent, HostedAgentConfig + + # Should work + hosted = HostedAgent(provider="anthropic") + assert hosted.provider == "anthropic" + + # Should raise ValueError for non-existent managed runtimes + with pytest.raises(ValueError) as exc_info: + HostedAgent(provider="modal") + assert "not yet available" in str(exc_info.value) + assert "LocalAgent" in str(exc_info.value) + + with pytest.raises(ValueError) as exc_info: + HostedAgent(provider="e2b") + assert "not yet available" in str(exc_info.value) + + with pytest.raises(ValueError) as exc_info: + HostedAgent(provider="openai") + assert "not yet available" in str(exc_info.value) + + +def test_local_agent_rejects_provider_overload(): + """Test that LocalAgent rejects the provider= overload pattern.""" + from praisonai.integrations import LocalAgent, LocalAgentConfig + + # Should work without provider= + local = LocalAgent(config=LocalAgentConfig(model="gpt-4o-mini")) + + # Should warn when provider= is used (deprecated pattern) + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + LocalAgent(provider="openai", config=LocalAgentConfig(model="gpt-4o-mini")) + # Filter to only DeprecationWarning containing provider= to avoid false positives + dep_warnings = [ + rec for rec in w + if issubclass(rec.category, DeprecationWarning) + and "provider=" in str(rec.message) + and "config.model=" in str(rec.message) + ] + assert len(dep_warnings) == 1, f"Expected 1 provider= deprecation warning, got {len(dep_warnings)} from {len(w)} total warnings" + + +def test_managed_agent_deprecation_warnings(): + """Test that ManagedAgent emits proper deprecation warnings.""" + from praisonai.integrations.managed_agents import ManagedAgent + + # LLM routing providers should emit deprecation warning + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + + with patch('praisonai.integrations.managed_local.LocalManagedAgent'): + ManagedAgent(provider="openai") + assert len(w) == 1 + assert issubclass(w[0].category, DeprecationWarning) + assert "deprecated" in str(w[0].message).lower() + assert "LocalAgent" in str(w[0].message) + + +def test_managed_agent_compute_provider_errors(): + """Test that ManagedAgent raises proper errors for compute provider names.""" + from praisonai.integrations.managed_agents import ManagedAgent + + # Compute providers should raise ValueError + with pytest.raises(ValueError) as exc_info: + ManagedAgent(provider="modal") + assert "compute" in str(exc_info.value).lower() + assert "LocalAgent" in str(exc_info.value) + + with pytest.raises(ValueError) as exc_info: + ManagedAgent(provider="e2b") + assert "compute" in str(exc_info.value).lower() + assert "LocalAgent" in str(exc_info.value) + + with pytest.raises(ValueError) as exc_info: + ManagedAgent(provider="docker") + assert "compute" in str(exc_info.value).lower() + assert "LocalAgent" in str(exc_info.value) + + +def test_managed_agent_anthropic_passthrough(): + """Test that ManagedAgent(provider='anthropic') still works.""" + from praisonai.integrations.managed_agents import ManagedAgent, AnthropicManagedAgent + + with patch('praisonai.integrations.managed_agents.AnthropicManagedAgent') as mock_anthropic: + mock_instance = MagicMock() + mock_anthropic.return_value = mock_instance + + result = ManagedAgent(provider="anthropic") + + mock_anthropic.assert_called_once_with(provider="anthropic") + assert result == mock_instance + + +def test_backward_compatibility_all_old_names(): + """Test that all old import paths still work.""" + # All these should import without errors + from praisonai.integrations.managed_agents import ( + ManagedAgent, ManagedConfig, AnthropicManagedAgent, + ManagedAgentIntegration, ManagedBackendConfig + ) + from praisonai.integrations.managed_local import ( + LocalManagedAgent, LocalManagedConfig + ) + from praisonai.integrations.sandboxed_agent import ( + SandboxedAgent, SandboxedAgentConfig + ) + + # Top-level imports + from praisonai import ( + ManagedAgent as TopManagedAgent, + ManagedConfig as TopManagedConfig, + AnthropicManagedAgent as TopAnthropicManagedAgent, + LocalManagedAgent as TopLocalManagedAgent, + LocalManagedConfig as TopLocalManagedConfig, + SandboxedAgent as TopSandboxedAgent, + SandboxedAgentConfig as TopSandboxedAgentConfig, + ) + + # All should be defined + assert ManagedAgent is not None + assert ManagedConfig is not None + assert AnthropicManagedAgent is not None + assert ManagedAgentIntegration is not None + assert ManagedBackendConfig is not None + assert LocalManagedAgent is not None + assert LocalManagedConfig is not None + assert SandboxedAgent is not None + assert SandboxedAgentConfig is not None + + +def test_config_aliases(): + """Test that config class aliases work correctly.""" + from praisonai.integrations import ( + HostedAgent, HostedAgentConfig, LocalAgent, LocalAgentConfig + ) + from praisonai.integrations.managed_agents import ManagedConfig + from praisonai.integrations.managed_local import LocalManagedConfig + + # HostedAgentConfig should alias ManagedConfig + assert HostedAgentConfig is ManagedConfig + + # LocalAgentConfig should alias LocalManagedConfig + assert LocalAgentConfig is LocalManagedConfig + + +def test_unknown_provider_error(): + """Test that unknown providers raise helpful error messages.""" + from praisonai.integrations.managed_agents import ManagedAgent + + with pytest.raises(ValueError) as exc_info: + ManagedAgent(provider="unknown-provider") + assert "Unknown provider" in str(exc_info.value) + assert "anthropic" in str(exc_info.value) + assert "LocalAgent" in str(exc_info.value) \ No newline at end of file