diff --git a/src/praisonai/praisonai/cli/app.py b/src/praisonai/praisonai/cli/app.py index 2a32ff271..c5722c706 100644 --- a/src/praisonai/praisonai/cli/app.py +++ b/src/praisonai/praisonai/cli/app.py @@ -35,6 +35,48 @@ def _setup_langfuse_observability(*, verbose: bool = False) -> None: typer.echo(f"Warning: failed to initialize Langfuse observability: {e}", err=True) +def _setup_langextract_observability(*, verbose: bool = False) -> None: + """Set up Langextract observability by wiring TraceSink to action emitter.""" + try: + import importlib.util + + # Explicitly check if langextract is available before attempting to use it + if importlib.util.find_spec('langextract') is None: + if verbose: + typer.echo("Warning: langextract is not installed. Install with: pip install 'praisonai[langextract]'", err=True) + return + + from praisonai.observability.langextract import LangextractSink, LangextractSinkConfig + from praisonaiagents.trace.protocol import TraceEmitter, set_default_emitter + import os + import atexit + + # Build LangextractSinkConfig from env vars + config = LangextractSinkConfig( + output_path=os.getenv("PRAISONAI_LANGEXTRACT_OUTPUT", "praisonai-trace.html"), + auto_open=os.getenv("PRAISONAI_LANGEXTRACT_AUTO_OPEN", "false").lower() == "true", + ) + + # Create LangextractSink + sink = LangextractSink(config=config) + + # Ensure sink is closed on exit to write the trace file + atexit.register(sink.close) + + # Set up action-level trace emitter + emitter = TraceEmitter(sink=sink, enabled=True) + set_default_emitter(emitter) + + except ImportError: + # Gracefully degrade if langextract not installed + if verbose: + typer.echo("Warning: langextract is not installed. Install with: pip install 'praisonai[langextract]'", err=True) + except Exception as e: + # Avoid breaking CLI if observability setup fails + if verbose: + typer.echo(f"Warning: failed to initialize langextract observability: {e}", err=True) + + class OutputFormat(str, Enum): """Output format options.""" text = "text" @@ -125,7 +167,7 @@ def main_callback( None, "--observe", "-O", - help="Enable observability (langfuse, langsmith, etc.)", + help="Enable observability (langfuse, langextract)", envvar="PRAISONAI_OBSERVE", ), ): @@ -148,9 +190,15 @@ def main_callback( # Validate and set up observability if requested if observe: - if observe != "langfuse": - raise typer.BadParameter(f"Unsupported observe provider: {observe}") - _setup_langfuse_observability(verbose=verbose) + if observe == "langfuse": + _setup_langfuse_observability(verbose=verbose) + elif observe == "langextract": + _setup_langextract_observability(verbose=verbose) + else: + raise typer.BadParameter( + f"Unsupported observe provider: {observe}. " + "Choose one of: langfuse, langextract." + ) # Determine output mode if state.quiet: @@ -278,6 +326,7 @@ def register_commands(): from .commands.flow import app as flow_app from .commands.unified import app as unified_app from .commands.langfuse import app as langfuse_app + from .commands.langextract import app as langextract_app from .commands.port import app as port_app from .commands.managed import app as managed_app from .commands.up import app as up_app @@ -465,6 +514,7 @@ def app_cmd( app.add_typer(flow_app, name="flow", help="Visual workflow builder (Langflow)") app.add_typer(unified_app, name="dashboard", help="๐ŸŒŸ Unified Dashboard (Flow + Claw + UI)") app.add_typer(langfuse_app, name="langfuse", help="๐Ÿ” Langfuse observability platform") + app.add_typer(langextract_app, name="langextract", help="๐Ÿง  Langextract visual trace layer") app.add_typer(port_app, name="port", help="๐Ÿ”Œ Manage port usage and resolve conflicts") app.add_typer(up_app, name="up", help="๐Ÿš€ Start unified PraisonAI stack (Langfuse + Langflow)") diff --git a/src/praisonai/praisonai/cli/commands/__init__.py b/src/praisonai/praisonai/cli/commands/__init__.py index f3b446754..c7addf4b8 100644 --- a/src/praisonai/praisonai/cli/commands/__init__.py +++ b/src/praisonai/praisonai/cli/commands/__init__.py @@ -27,6 +27,7 @@ 'examples_app', 'replay_app', 'github_app', + 'langextract_app', ] @@ -92,4 +93,7 @@ def __getattr__(name: str): elif name == 'github_app': from .github import app as github_app return github_app + elif name == 'langextract_app': + from .langextract import app as langextract_app + return langextract_app raise AttributeError(f"module {__name__!r} has no attribute {name!r}") diff --git a/src/praisonai/praisonai/cli/commands/langextract.py b/src/praisonai/praisonai/cli/commands/langextract.py new file mode 100644 index 000000000..57925098f --- /dev/null +++ b/src/praisonai/praisonai/cli/commands/langextract.py @@ -0,0 +1,106 @@ +""" +PraisonAI Langextract Commands. + +CLI commands for rendering PraisonAI traces with langextract: +- `praisonai langextract view` - render existing JSONL to HTML +- `praisonai langextract render` - run workflow with langextract observability +""" + +import typer +import webbrowser +from pathlib import Path +from typing import Optional + +app = typer.Typer(name="langextract", help="Render PraisonAI traces with langextract.") + + +@app.command(name="view") +def view( + jsonl_path: Path = typer.Argument(..., help="Path to annotated-documents JSONL"), + output_html: Path = typer.Option("trace.html", "--output", "-o", help="Output HTML file path"), + no_open: bool = typer.Option(False, "--no-open", help="Don't open HTML in browser"), +): + """Render an existing annotated-documents JSONL to an interactive HTML.""" + try: + import langextract as lx # type: ignore + except ImportError: + typer.echo("Error: langextract is not installed. Install with: pip install 'praisonai[langextract]'", err=True) + raise typer.Exit(1) + + if not jsonl_path.exists(): + typer.echo(f"Error: JSONL file not found: {jsonl_path}", err=True) + raise typer.Exit(1) + + try: + html = lx.visualize(str(jsonl_path)) + html_text = html.data if hasattr(html, "data") else html + output_html.write_text(html_text, encoding="utf-8") + typer.echo(f"โœ… Wrote {output_html}") + + if not no_open: + webbrowser.open(f"file://{output_html.resolve()}") + except Exception as e: + typer.echo(f"Error: Failed to render HTML: {e}", err=True) + raise typer.Exit(1) + + +@app.command(name="render") +def render( + yaml_path: Path = typer.Argument(..., help="PraisonAI YAML workflow"), + output_html: Path = typer.Option("workflow.html", "--output", "-o", help="Output HTML file path"), + no_open: bool = typer.Option(False, "--no-open", help="Don't open HTML in browser"), + api_url: Optional[str] = typer.Option(None, "--api-url", help="API URL (if using remote API)"), +): + """Run a workflow end-to-end with LangextractSink attached, then open the HTML.""" + try: + import langextract # noqa: F401 โ€” probe optional dep early for clear error + from praisonai.observability import LangextractSink, LangextractSinkConfig + from praisonaiagents.trace.protocol import TraceEmitter, set_default_emitter + from praisonai import PraisonAI + except ImportError as e: + typer.echo( + f"Error: Missing dependencies: {e}. " + "Install langextract with: pip install 'praisonai[langextract]'", + err=True, + ) + raise typer.Exit(1) from e + + if not yaml_path.exists(): + typer.echo(f"Error: YAML file not found: {yaml_path}", err=True) + raise typer.Exit(1) + + # Set up langextract observability + config = LangextractSinkConfig( + output_path=str(output_html), + auto_open=not no_open, + ) + sink = LangextractSink(config=config) + + # Set up trace emitter for the duration of the run + emitter = TraceEmitter(sink=sink, enabled=True) + set_default_emitter(emitter) + + try: + # Run the workflow + praison = PraisonAI(agent_file=str(yaml_path)) + if api_url: + praison.api_url = api_url.rstrip("/") + + result = praison.main() + typer.echo(result) + + except Exception as e: + typer.echo(f"Error: Workflow failed: {e}", err=True) + raise typer.Exit(1) from e + finally: + # Ensure sink is closed even if workflow fails + sink.close() + + if output_html.exists(): + typer.echo(f"โœ… Trace rendered: {output_html}") + else: + typer.echo( + f"Error: Trace was not rendered to {output_html} (see logs for details)", + err=True, + ) + raise typer.Exit(1) \ No newline at end of file diff --git a/src/praisonai/praisonai/observability/__init__.py b/src/praisonai/praisonai/observability/__init__.py index c9c789680..265445fb1 100644 --- a/src/praisonai/praisonai/observability/__init__.py +++ b/src/praisonai/praisonai/observability/__init__.py @@ -9,6 +9,7 @@ if TYPE_CHECKING: from .langfuse import LangfuseSink, LangfuseSinkConfig + from .langextract import LangextractSink, LangextractSinkConfig __all__ = [] @@ -20,5 +21,11 @@ def __getattr__(name: str): elif name == "LangfuseSinkConfig": from .langfuse import LangfuseSinkConfig return LangfuseSinkConfig + elif name == "LangextractSink": + from .langextract import LangextractSink + return LangextractSink + elif name == "LangextractSinkConfig": + from .langextract import LangextractSinkConfig + return LangextractSinkConfig else: raise AttributeError(f"module '{__name__}' has no attribute '{name}'") \ No newline at end of file diff --git a/src/praisonai/praisonai/observability/langextract.py b/src/praisonai/praisonai/observability/langextract.py new file mode 100644 index 000000000..9f43f79db --- /dev/null +++ b/src/praisonai/praisonai/observability/langextract.py @@ -0,0 +1,186 @@ +""" +Langextract TraceSinkProtocol Implementation for PraisonAI. + +Provides LangextractSink adapter that implements TraceSinkProtocol from the core SDK, +producing self-contained interactive HTML visualizations of agent runs grounded in +the original input text. + +Architecture: +- Core SDK (praisonaiagents): Defines TraceSinkProtocol (unchanged) +- Wrapper (praisonai): Implements LangextractSink adapter (this file) +- Pattern: Protocol-driven design per AGENTS.md ยง4.1 โ€” mirrors LangfuseSink +""" + +from __future__ import annotations +import os +import threading +import webbrowser +from dataclasses import dataclass, field +from pathlib import Path +from typing import Any, Dict, List, Optional + +from praisonaiagents.trace.protocol import ( + ActionEvent, + ActionEventType, + TraceSinkProtocol, +) + + +@dataclass +class LangextractSinkConfig: + """Configuration for the langextract trace sink.""" + output_path: str = "praisonai-trace.html" + jsonl_path: Optional[str] = None # derived from output_path if None + document_id: str = "praisonai-run" + auto_open: bool = False # open HTML in browser on close() + include_llm_content: bool = True # include response text in attributes + include_tool_args: bool = True + enabled: bool = True + + +class LangextractSink: + """ + Implements `TraceSinkProtocol` by accumulating ActionEvents and, on `close()`, + rendering them as a langextract AnnotatedDocument + interactive HTML. + + Grounding strategy: + - We record the first AGENT_START's `metadata["input"]` as the source text. + - OUTPUT events produce extractions grounded against the agent's output. + - TOOL_* events produce ungrounded extractions (char_interval=None) whose + `attributes` carry the tool name, args summary, duration, status. + - AGENT_START/END bracket a run; we emit a single parent "agent" extraction + spanning the whole document for overview. + """ + + __slots__ = ("_config", "_lock", "_events", "_source_text", "_closed") + + def __init__(self, config: Optional[LangextractSinkConfig] = None) -> None: + self._config = config or LangextractSinkConfig() + self._lock = threading.Lock() + self._events: List[ActionEvent] = [] + self._source_text: Optional[str] = None + self._closed = False + + # ---- TraceSinkProtocol ------------------------------------------------- + + def emit(self, event: ActionEvent) -> None: + if not self._config.enabled or self._closed: + return + with self._lock: + # Capture source text from first AGENT_START + if ( + self._source_text is None + and event.event_type == ActionEventType.AGENT_START.value + and event.metadata + ): + self._source_text = event.metadata.get("input") or "" + self._events.append(event) + + def flush(self) -> None: + pass # no-op; HTML is built on close() + + def close(self) -> None: + with self._lock: + if self._closed: + return + self._closed = True + try: + self._render() + except Exception: + # Observability must never break the agent + import logging + logging.getLogger(__name__).exception("LangextractSink render failed") + + # ---- Rendering --------------------------------------------------------- + + def _render(self) -> None: + # Lazy import โ€” langextract is optional + try: + import langextract as lx # type: ignore + except ImportError as err: + raise ImportError( + "langextract is not installed. Install with: pip install 'praisonai[langextract]'" + ) from err + + # Capture snapshot of events under lock to ensure thread safety + with self._lock: + events = self._events[:] + source = self._source_text or "" + + # Skip rendering if no events were recorded + if not events: + return + + extractions = list(self._events_to_extractions(lx, source, events)) + doc = lx.data.AnnotatedDocument( + document_id=self._config.document_id, + text=source, + extractions=extractions, + ) + + jsonl = self._config.jsonl_path or (Path(self._config.output_path).with_suffix(".jsonl").as_posix()) + Path(jsonl).parent.mkdir(parents=True, exist_ok=True) + lx.io.save_annotated_documents([doc], output_name=os.path.basename(jsonl), output_dir=os.path.dirname(jsonl) or ".") + + html = lx.visualize(jsonl) + html_text = html.data if hasattr(html, "data") else html + + # Create parent directory for output path if it doesn't exist + output_path = Path(self._config.output_path) + output_path.parent.mkdir(parents=True, exist_ok=True) + output_path.write_text(html_text, encoding="utf-8") + + if self._config.auto_open: + webbrowser.open(f"file://{output_path.resolve()}") + + def _events_to_extractions(self, lx, source: str, events: List[ActionEvent]): + """Pure mapper: ActionEvent list -> lx.data.Extraction generator.""" + for ev in events: + et = ev.event_type + attrs: Dict[str, Any] = { + "agent_name": ev.agent_name, + "duration_ms": ev.duration_ms, + "status": ev.status, + } + if et == ActionEventType.AGENT_START.value: + yield lx.data.Extraction( + extraction_class="agent_run", + extraction_text=(source[:200] if source else ev.agent_name or "agent"), + attributes={**attrs, "kind": "start"}, + ) + elif et == ActionEventType.TOOL_START.value: + yield lx.data.Extraction( + extraction_class="tool_call", + extraction_text=ev.tool_name or "tool", + attributes={ + **attrs, + "tool_name": ev.tool_name, + "tool_args": ev.tool_args if self._config.include_tool_args else None, + }, + ) + elif et == ActionEventType.TOOL_END.value: + yield lx.data.Extraction( + extraction_class="tool_result", + extraction_text=ev.tool_result_summary or "(empty)", + attributes={**attrs, "tool_name": ev.tool_name}, + ) + elif et == ActionEventType.OUTPUT.value: + # Fix: OUTPUT events store text in tool_result_summary, not metadata['content'] + output_text = ( + ev.tool_result_summary + or (ev.metadata or {}).get("output") + or (ev.metadata or {}).get("content") + or "" + ) + yield lx.data.Extraction( + extraction_class="final_output", + extraction_text=output_text[:1000], + attributes=attrs, + ) + elif et == ActionEventType.ERROR.value: + yield lx.data.Extraction( + extraction_class="error", + extraction_text=ev.error_message or "error", + attributes=attrs, + ) + # AGENT_END is summary-only โ€” skip for now; could produce run stats extraction \ No newline at end of file diff --git a/src/praisonai/pyproject.toml b/src/praisonai/pyproject.toml index 955f5828a..0aacb9af8 100644 --- a/src/praisonai/pyproject.toml +++ b/src/praisonai/pyproject.toml @@ -45,6 +45,7 @@ os = [ ] agentops = ["agentops>=0.3.12"] langfuse = ["langfuse>=3.0.0"] +langextract = ["langextract>=1.0.0"] chat = [ "chainlit>=2.8.5,<=2.9.4", "aiosqlite>=0.20.0", diff --git a/src/praisonai/tests/unit/test_langextract_sink.py b/src/praisonai/tests/unit/test_langextract_sink.py new file mode 100644 index 000000000..d95b41803 --- /dev/null +++ b/src/praisonai/tests/unit/test_langextract_sink.py @@ -0,0 +1,346 @@ +""" +Tests for LangextractSink adapter. + +Unit tests for the langextract observability integration, focusing on: +- LangextractSink implements TraceSinkProtocol correctly +- Event-to-extraction mapping is accurate +- HTML output is generated correctly +- Lazy imports work properly +""" + +import pytest +import tempfile +import time +import builtins +from pathlib import Path +from unittest.mock import Mock, patch + +from praisonaiagents.trace.protocol import ActionEvent, ActionEventType + + +_REAL_IMPORT = builtins.__import__ + + +def _import_with_langextract_failure(name, global_vars=None, local_vars=None, fromlist=(), level=0): + """Import hook that fails only for langextract.""" + if name == "langextract": + raise ImportError("No module named 'langextract'") + return _REAL_IMPORT(name, global_vars, local_vars, fromlist, level) + + +@pytest.fixture +def sample_events(): + """Sample ActionEvents for testing.""" + ts = time.time() + return [ + ActionEvent( + event_type=ActionEventType.AGENT_START.value, + timestamp=ts, + agent_name="test_agent", + metadata={"input": "Test input for the agent to process"} + ), + ActionEvent( + event_type=ActionEventType.TOOL_START.value, + timestamp=ts + 1, + agent_name="test_agent", + tool_name="search", + tool_args={"query": "test query"} + ), + ActionEvent( + event_type=ActionEventType.TOOL_END.value, + timestamp=ts + 2, + agent_name="test_agent", + tool_name="search", + duration_ms=100.0, + status="ok", + tool_result_summary="Found 5 results" + ), + ActionEvent( + event_type=ActionEventType.OUTPUT.value, + timestamp=ts + 3, + agent_name="test_agent", + metadata={"content": "Final agent output based on search results"} + ), + ActionEvent( + event_type=ActionEventType.AGENT_END.value, + timestamp=ts + 4, + agent_name="test_agent", + duration_ms=500.0, + status="ok" + ) + ] + + +class TestLangextractSink: + """Test LangextractSink implementation.""" + + def test_lazy_import_without_langextract(self): + """Test that LangextractSink can be imported without langextract installed.""" + # This should work even if langextract is not available + from praisonai.observability import LangextractSink, LangextractSinkConfig + + config = LangextractSinkConfig() + sink = LangextractSink(config=config) + + # Basic properties should work + assert sink._config.enabled is True + assert sink._closed is False + + def test_sink_config_defaults(self): + """Test LangextractSinkConfig default values.""" + from praisonai.observability import LangextractSinkConfig + + config = LangextractSinkConfig() + assert config.output_path == "praisonai-trace.html" + assert config.jsonl_path is None + assert config.document_id == "praisonai-run" + assert config.auto_open is False + assert config.include_llm_content is True + assert config.include_tool_args is True + assert config.enabled is True + + def test_sink_implements_protocol(self): + """Test that LangextractSink implements TraceSinkProtocol.""" + from praisonai.observability import LangextractSink + from praisonaiagents.trace.protocol import TraceSinkProtocol + + sink = LangextractSink() + assert isinstance(sink, TraceSinkProtocol) + + # Protocol methods should exist + assert hasattr(sink, 'emit') + assert hasattr(sink, 'flush') + assert hasattr(sink, 'close') + + def test_event_accumulation(self, sample_events): + """Test that events are accumulated correctly.""" + from praisonai.observability import LangextractSink + + sink = LangextractSink() + + # Emit all events + for event in sample_events: + sink.emit(event) + + # Events should be stored + assert len(sink._events) == len(sample_events) + assert sink._source_text == "Test input for the agent to process" + + def test_disabled_sink_ignores_events(self, sample_events): + """Test that disabled sink ignores all events.""" + from praisonai.observability import LangextractSink, LangextractSinkConfig + + config = LangextractSinkConfig(enabled=False) + sink = LangextractSink(config=config) + + # Emit events + for event in sample_events: + sink.emit(event) + + # No events should be stored + assert len(sink._events) == 0 + assert sink._source_text is None + + def test_events_to_extractions_mapping(self, sample_events): + """Test that ActionEvents are mapped to langextract extractions correctly.""" + from praisonai.observability import LangextractSink + + # Mock langextract module + mock_lx = Mock() + mock_extraction = Mock() + mock_lx.data.Extraction = Mock(return_value=mock_extraction) + + sink = LangextractSink() + for event in sample_events: + sink.emit(event) + + # Test the mapping function + extractions = list(sink._events_to_extractions(mock_lx, "Test input text", sink._events)) + + # AGENT_END is intentionally skipped in current implementation + assert len(extractions) == 4 + + # Check that each event type creates an extraction + assert mock_lx.data.Extraction.call_count == 4 + + @patch('praisonai.observability.langextract.webbrowser.open') + def test_render_with_mock_langextract(self, mock_browser, sample_events): + """Test rendering with mocked langextract.""" + import sys + from praisonai.observability import LangextractSink, LangextractSinkConfig + + # Mock langextract module + mock_lx = Mock() + mock_doc = Mock() + mock_html = Mock() + mock_html.data = "Test HTML content" + + mock_lx.data.AnnotatedDocument = Mock(return_value=mock_doc) + mock_lx.data.Extraction = Mock() + mock_lx.io.save_annotated_documents = Mock() + mock_lx.visualize = Mock(return_value=mock_html) + + with tempfile.TemporaryDirectory() as temp_dir: + output_path = Path(temp_dir) / "test.html" + config = LangextractSinkConfig( + output_path=str(output_path), + auto_open=True + ) + sink = LangextractSink(config=config) + + # Emit events + for event in sample_events: + sink.emit(event) + + # Mock the langextract import directly + with patch.dict(sys.modules, {"langextract": mock_lx}): + sink.close() + + # Verify HTML file was written + assert output_path.exists() + content = output_path.read_text() + assert "Test HTML content" in content + + # Verify browser was opened + mock_browser.assert_called_once() + + def test_close_idempotent(self, sample_events): + """Test that close() can be called multiple times safely.""" + from praisonai.observability import LangextractSink, LangextractSinkConfig + + with tempfile.TemporaryDirectory() as temp_dir: + output_path = Path(temp_dir) / "trace.html" + sink = LangextractSink(config=LangextractSinkConfig(output_path=str(output_path))) + for event in sample_events: + sink.emit(event) + + # Mock langextract to avoid import error + mock_lx = Mock() + with patch.dict("sys.modules", {"langextract": mock_lx}): + mock_lx.data.AnnotatedDocument = Mock() + mock_lx.data.Extraction = Mock() + mock_lx.io.save_annotated_documents = Mock() + mock_lx.visualize = Mock(return_value=Mock(data="")) + + # First close should work + sink.close() + assert sink._closed is True + + # Second close should be no-op + sink.close() + assert sink._closed is True + + def test_flush_no_op(self): + """Test that flush() is a no-op.""" + from praisonai.observability import LangextractSink + + sink = LangextractSink() + # Should not raise any exception + sink.flush() + + def test_import_error_handling(self, sample_events): + """Test graceful handling of langextract import error.""" + from praisonai.observability import LangextractSink + + sink = LangextractSink() + for event in sample_events: + sink.emit(event) + + # Force ImportError for optional dependency + with patch("builtins.__import__", side_effect=_import_with_langextract_failure): + # Should not raise, just log warning + sink.close() + assert sink._closed is True + + +class TestLangextractCLI: + """Test langextract CLI commands.""" + + @pytest.mark.parametrize("command", ["view", "render"]) + def test_cli_commands_exist(self, command): + """Test that CLI commands are registered.""" + from praisonai.cli.commands.langextract import app + + # Check that the command exists + registered = app.registered_commands + command_objects = registered.values() if hasattr(registered, "values") else registered + commands = {cmd.name for cmd in command_objects} + assert command in commands + + def test_view_command_missing_file(self): + """Test view command with missing JSONL file.""" + # First mock the imports to avoid ImportError + mock_lx = Mock() + with patch.dict("sys.modules", {"langextract": mock_lx}): + from praisonai.cli.commands.langextract import view + import typer + + with pytest.raises(typer.Exit): + view(Path("/nonexistent/file.jsonl")) + + def test_render_command_missing_yaml(self): + """Test render command with missing YAML file.""" + # First mock the imports to avoid ImportError + mock_lx = Mock() + mock_observability = Mock() + mock_praisonai = Mock() + with patch.dict("sys.modules", { + "langextract": mock_lx, + "praisonai.observability": mock_observability, + "praisonai": mock_praisonai + }): + from praisonai.cli.commands.langextract import render + import typer + + with pytest.raises(typer.Exit): + render(Path("/nonexistent/workflow.yaml")) + + +class TestLangextractObservabilitySetup: + """Test CLI observability setup.""" + + def test_observe_langextract_calls_setup(self): + """Test that --observe langextract calls the setup function.""" + import praisonai.cli.app as cli_app + mock_ctx = Mock(invoked_subcommand="test") + + with patch.object(cli_app, "_setup_langextract_observability") as mock_setup: + # Mock all the required arguments with defaults + cli_app.main_callback( + ctx=mock_ctx, + observe="langextract", + version=False, + output_format=cli_app.OutputFormat.text, + json_output=False, + no_color=False, + quiet=False, + verbose=False, + screen_reader=False + ) + + # Setup should have been called + mock_setup.assert_called_once() + + def test_observe_invalid_provider_error(self): + """Test that invalid observe provider raises error.""" + import typer + import praisonai.cli.app as cli_app + mock_ctx = Mock(invoked_subcommand="test") + + with patch('sys.argv', ['praisonai', '--observe', 'invalid-provider']): + with pytest.raises(typer.BadParameter, match="Unsupported observe provider"): + cli_app.main_callback( + ctx=mock_ctx, + observe="invalid-provider", + version=False, + output_format=cli_app.OutputFormat.text, + json_output=False, + no_color=False, + quiet=False, + verbose=False, + screen_reader=False + ) + + +if __name__ == "__main__": + pytest.main([__file__, "-v"])