diff --git a/.gitignore b/.gitignore index 0596fac50..9d8b98edb 100644 --- a/.gitignore +++ b/.gitignore @@ -34,8 +34,6 @@ share/python-wheels/ MANIFEST # PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec @@ -62,23 +60,10 @@ cover/ *.mo *.pot -# Crew stuff: -demo_stands/ - -# Django stuff: +# Logs *.log -local_settings.py -db.sqlite3 -db.sqlite3-journal -db/*sqlite3 -*_stand/ backend/logs/* -# Flask stuff: -instance/ -.webassets-cache - -# Scrapy stuff: -.scrapy +backend/app/logs # Sphinx documentation docs/_build/ @@ -95,42 +80,14 @@ profile_default/ ipython_config.py # pyenv -# For a library or package, you might want to ignore these files since the code is -# intended to run in multiple environments; otherwise, check them in: # .python-version -# pipenv -# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. -# However, in case of collaboration, if having platform-specific dependencies or dependencies -# having no cross-platform support, pipenv may install dependencies that don't work, or not -# install all needed dependencies. -#Pipfile.lock - -# poetry -# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. -# This is especially recommended for binary packages to ensure reproducibility, and is more -# commonly ignored for libraries. -# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control -#poetry.lock - # pdm -# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. -#pdm.lock -# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it -# in version control. -# https://pdm.fming.dev/#use-with-ide .pdm.toml -# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +# PEP 582 __pypackages__/ -# Celery stuff -celerybeat-schedule -celerybeat.pid - -# SageMath parsed files -*.sage.py - # Environments *.env .env @@ -138,17 +95,13 @@ celerybeat.pid # env.* 会误伤 backend/alembic/env.py,故只忽略根目录下的 env.* /env.* !frontend/lib/core/config/env.ts # 允许前端配置文件 -src/ENV/ venv/ ENV/ env/ env.bak/ venv.bak/ -.django_venv -.crew_venv *venv venvs/ -run_program/manager.env !src/.env !src/debug.env @@ -183,10 +136,6 @@ dmypy.json cython_debug/ # PyCharm -# JetBrains specific template is maintained in a separate JetBrains.gitignore that can -# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore -# and can be added to the global gitignore or merged into this file. For a more nuclear -# option (not recommended) you can uncomment the following to ignore the entire idea folder. .idea/ # VSCode @@ -195,8 +144,6 @@ cython_debug/ # Logging files log.txt .telemetry_log/ - -backend/langgraph_app.* backend/app/logs *.jsonl @@ -221,17 +168,11 @@ api_keys.json .secrets.baseline.scan.json .secrets.baseline.tmp -# Additional tool -additional_tools/ -make_scripts/backups - # Savefiles **/savefiles # FRONTEND GIT ignore - - # Compiled output frontend/dist frontend/tmp @@ -281,13 +222,7 @@ frontend/typings frontend/.DS_Store Thumbs.db -executions -openai-realtime-console -src/django_app/staticfiles/* - - -#knowledge data -knowledge/graph_data/ +/executions # IDE/Editor folders .claude/ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c531b17ce..fb5477fb1 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -29,8 +29,7 @@ repos: - id: ruff-format files: ^backend/ - # Backend - Ruff check (强制检查,不允许错误) - # 注意:此 hook 会检查整个 backend 目录,确保没有 lint 错误 + # Backend - Ruff strict check - repo: local hooks: - id: backend-ruff-check @@ -39,22 +38,10 @@ repos: language: system files: ^backend/.*\.py$ pass_filenames: false - always_run: false # 只在有 Python 文件更改时运行 - stages: [commit] + always_run: false + stages: [pre-commit] - # Python - Type checking (mypy runs in Backend CI job only; skipped in pre-commit) - # - repo: https://github.com/pre-commit/mirrors-mypy - # rev: v1.13.0 - # hooks: - # - id: mypy - # files: ^backend/app/ - # args: [--ignore-missing-imports] - # additional_dependencies: - # - types-requests - # - types-PyYAML - - # Frontend - ESLint (强制检查,不允许错误) - # 注意:此 hook 会检查整个 frontend 目录,确保没有 lint 错误 + # Frontend - ESLint + TypeScript type-check + Prettier format check - repo: local hooks: - id: frontend-lint @@ -63,8 +50,26 @@ repos: language: system files: ^frontend/.*\.(ts|tsx|js|jsx)$ pass_filenames: false - always_run: false # 只在有前端文件更改时运行 - stages: [commit] + always_run: false + stages: [pre-commit] + + - id: frontend-type-check + name: Frontend TypeScript Check + entry: bash -c 'cd frontend && bun run type-check || exit 1' + language: system + files: ^frontend/.*\.(ts|tsx)$ + pass_filenames: false + always_run: false + stages: [pre-commit] + + - id: frontend-format-check + name: Frontend Prettier Check + entry: bash -c 'cd frontend && bun run format:check || exit 1' + language: system + files: ^frontend/.*\.(ts|tsx|js|jsx|json|css|md)$ + pass_filenames: false + always_run: false + stages: [pre-commit] # CI configuration ci: diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index 29640ebe7..5b91822e6 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -28,8 +28,8 @@ This document provides detailed instructions for setting up and running the JoyS Using Docker (recommended): ```bash -cd backend/docker -./start.sh +cd deploy +docker compose -f docker-compose-middleware.yml up -d ``` Or manually start PostgreSQL and Redis on your system. diff --git a/README.md b/README.md index 32cf6a2e1..2c52b49c7 100644 --- a/README.md +++ b/README.md @@ -220,11 +220,15 @@ All modes support remote deployment scenarios: **Key design principles:** +- **Single source of truth for value domains** — `core/contracts/` defines every canonical value (status, error code, trigger source) as Literal types + set constants; no magic strings +- **Engine protocol + registry** — all execution engines implement `ExecutionEngine` Protocol; `EngineRegistry` maps `runtime_kind` to engine instances; adding a new engine is a 3-file change +- **Two-phase event bus** — Phase 1 (persistence + state transition) shares a DB transaction and commits atomically; Phase 2 (WebSocket + task sync) fans out in parallel +- **Centralized state machines** — 6 entity state machines (`Agent`, `Version`, `Release`, `Run`, `Execution`, `Task`) with `transition_*()` as the only status-mutation functions +- **Normalized error system** — `AppError.to_payload()` produces a canonical `ErrorDescriptor` (`{code, message, data, source, retryable, user_action}`) consumed identically across HTTP, WebSocket, SSE, and DB +- **Port/Adapter boundaries** — `core/ports/` defines Protocol interfaces; `services/` provides implementations; `core/` never imports concrete services +- **OTel-backed observation** — `ObservationCollector` injected into `ExecutionContext`; spans exported to both DB and WebSocket for real-time trace display - **Graph-based execution** — every agent workflow is a stateful LangGraph, enabling pause, resume, and branch -- **Unified Run Center** — Chat, Copilot, and Skill Creator share a single event-sourced run lifecycle (Run → Event → Snapshot) -- **Unified WebSocket layer** — BaseWsClient abstract class; Chat / Run / Notification clients share lifecycle, auth (ws-token), and reconnect logic -- **Full-chain trace_id propagation** — contextvars-based request tracing from HTTP/WS entry through LangGraph to persistence -- **Glass-box observability** — real-time Langfuse tracing of every agent decision and state transition +- **Unified WebSocket layer** — BaseWsClient abstract class; Execution / Notification clients share lifecycle, auth (ws-token), and reconnect logic - **RAII sandbox isolation** — per-user Docker containers with automatic handle release, zero state leakage - **Canonical model identifiers** — full-stack (provider_name, model_name) resolution via ModelService → ModelFactory - **Layered skill system** — skills are versioned units that compose into workflows without coupling @@ -262,6 +266,10 @@ All modes support remote deployment scenarios: | Tag | Feature | What it means | |-----|---------|---------------| +| **NEW** | **Architecture Hardening** | 5-layer execution architecture with Engine Protocol, EngineRegistry, two-phase EventBus, centralized state machines, and Port/Adapter boundaries | +| **NEW** | **Unified Error Contract** | `AppError.to_payload()` produces canonical `ErrorDescriptor` consumed identically across HTTP, WS, SSE, and DB; frontend `ApiError` mirrors the shape with typed `source`/`retryable`/`userAction` | +| **NEW** | **State Machine Centralization** | 6 entity state machines (`Agent`, `Version`, `Release`, `Run`, `Execution`, `Task`) with `transition_*()` as the only status-mutation functions | +| **NEW** | **Observation Tracing** | OTel-backed `ObservationCollector` injected into `ExecutionContext`; spans exported to DB and WebSocket for real-time trace display | | **NEW** | **Run Center Architecture** | Chat & Copilot fully integrated into Run Center — run details, session recovery, and live event replay on page refresh | | **NEW** | **Dark Mode & Preferences** | System / Light / Dark theme switching; redesigned profile page with language & theme preferences | | **NEW** | **Unified WebSocket Layer** | BaseWsClient abstract class — Chat, Run, and Notification clients share lifecycle, auth (ws-token), and reconnect logic | diff --git a/README_CN.md b/README_CN.md index e9e4219c6..5b69d34b1 100644 --- a/README_CN.md +++ b/README_CN.md @@ -220,11 +220,15 @@ **核心设计原则:** +- **值域唯一来源** —— `core/contracts/` 以 Literal 类型 + set 常量定义所有规范化值(状态、错误码、触发来源),杜绝魔术字符串 +- **引擎协议 + 注册表** —— 所有执行引擎实现 `ExecutionEngine` Protocol;`EngineRegistry` 将 `runtime_kind` 映射到引擎实例;添加新引擎只需改 3 个文件 +- **两阶段事件总线** —— 第 1 阶段(持久化 + 状态变迁)共享 DB 事务,原子提交;第 2 阶段(WebSocket + 任务同步)并行扇出 +- **集中化状态机** —— 6 个实体状态机(`Agent`、`Version`、`Release`、`Run`、`Execution`、`Task`),`transition_*()` 是唯一的状态修改入口 +- **规范化错误系统** —— `AppError.to_payload()` 输出规范的 `ErrorDescriptor`(`{code, message, data, source, retryable, user_action}`),HTTP/WS/SSE/DB 各传输路径一致消费 +- **端口/适配器边界** —— `core/ports/` 定义 Protocol 接口;`services/` 提供实现;`core/` 不导入具体服务 +- **OTel 观测追踪** —— `ObservationCollector` 注入 `ExecutionContext`;span 导出到 DB 和 WebSocket,支持实时追踪展示 - **图式执行** —— 每个 Agent 工作流都是有状态的 LangGraph,支持暂停、恢复与分支 -- **统一 Run Center** —— Chat、Copilot、Skill Creator 共享同一套事件溯源运行生命周期(Run → Event → Snapshot) -- **统一 WebSocket 层** —— BaseWsClient 抽象基类;Chat / Run / Notification 三端客户端共享生命周期、认证(ws-token)与重连逻辑 -- **trace_id 全链路追踪** —— 基于 contextvars 的请求追踪,从 HTTP/WS 入口贯穿 LangGraph 直至持久化 -- **白盒可观测性** —— 基于 Langfuse 实时追踪每一步 Agent 决策与状态流转 +- **统一 WebSocket 层** —— BaseWsClient 抽象基类;Execution / Notification 客户端共享生命周期、认证与重连逻辑 - **RAII 沙箱隔离** —— 用户级 Docker 容器,句柄自动释放,会话间零状态泄露 - **规范化模型标识** —— 全栈统一 (provider_name, model_name) 解析路径:ModelService → ModelFactory - **分层技能体系** —— 技能是版本化单元,可自由组合成工作流,互不耦合 @@ -262,6 +266,10 @@ | 标签 | 功能 | 一句话说明 | |------|------|-----------| +| **NEW** | **架构硬化** | 5 层执行架构:引擎协议 + EngineRegistry + 两阶段事件总线 + 集中化状态机 + 端口/适配器边界 | +| **NEW** | **统一错误契约** | `AppError.to_payload()` 输出规范 `ErrorDescriptor`,HTTP/WS/SSE/DB 一致消费;前端 `ApiError` 镜像类型化 `source`/`retryable`/`userAction` | +| **NEW** | **状态机集中化** | 6 个实体状态机(Agent、Version、Release、Run、Execution、Task),`transition_*()` 为唯一状态修改入口 | +| **NEW** | **观测追踪** | 基于 OTel 的 `ObservationCollector` 注入 ExecutionContext;span 导出到 DB 和 WebSocket,支持实时追踪 | | **NEW** | **Run Center 架构** | Chat 与 Copilot 全面迁入 Run Center——支持运行详情查看、会话恢复、页面刷新后实时事件回放 | | **NEW** | **深色模式与偏好设置** | 系统/浅色/深色三种主题切换;重新设计个人资料页面,新增语言与主题偏好 | | **NEW** | **统一 WebSocket 层** | 引入 BaseWsClient 抽象基类——Chat、Run、Notification 三端客户端共享生命周期、认证(ws-token)与重连逻辑 | diff --git a/backend/README.md b/backend/README.md index 5e52191c6..7de1b066e 100644 --- a/backend/README.md +++ b/backend/README.md @@ -48,6 +48,38 @@ cd backend uv run uvicorn app.main:app --reload --host 0.0.0.0 --port 8000 ``` +## 项目结构 + +``` +app/ +├── api/v1/ # REST 路由模块 +├── common/ +│ └── app_errors.py # 统一异常层次 + ErrorDescriptor 序列化 +├── core/ +│ ├── contracts/ # 值域唯一来源(DefinitionKind、RuntimeKind、ErrorCode、ErrorSource 等) +│ ├── engine/ # 执行引擎协议、注册表、4 个内建引擎(CLI/Graph/Code/Copilot) +│ ├── events/ # 两阶段事件总线(ExecutionEventBus)+ 4 个内建订阅者 +│ ├── state_machines/ # 集中化状态机(6 个实体的转换规则) +│ ├── observation/ # OTel-backed 观测追踪(注入 ExecutionContext) +│ ├── ports/ # Protocol 接口,解耦 core/ 与 services/ +│ ├── agent/ # CLI agent 后端(claude_code、codex、openclaw) +│ ├── copilot/ # Copilot 服务 +│ ├── graph/ # DeepAgents 图构建器 + 代码执行器 +│ ├── skill/ # 技能系统(渐进式加载) +│ ├── model/ # 模型提供商 + 凭据管理 +│ ├── tools/ # 工具解析器 + MCP 集成 +│ └── a2a/ # Agent-to-Agent 协议 +├── models/ # SQLAlchemy ORM 模型 +├── repositories/ # 数据访问层 +├── schemas/ # Pydantic 请求/响应 Schema +├── services/ # 服务层(DispatchService、ExecutionOrchestrator、40+ 模块) +├── websocket/ # WebSocket 处理器(/ws/executions、/ws/notifications、/ws/openclaw) +├── templates/ # 邮件模板(Jinja2) +└── utils/ # 共享工具 +``` + +> 完整架构文档:[`docs/ARCHITECTURE.md`](../docs/ARCHITECTURE.md) | [中文版](../docs/ARCHITECTURE_CN.md) + ## API 文档 - Swagger UI: http://localhost:8000/docs diff --git a/backend/alembic/versions/20260415_000000_b4b3b2b1b0a9_add_mission_agent_execution_tables.py b/backend/alembic/versions/20260415_000000_b4b3b2b1b0a9_add_mission_agent_execution_tables.py new file mode 100644 index 000000000..f02456dbb --- /dev/null +++ b/backend/alembic/versions/20260415_000000_b4b3b2b1b0a9_add_mission_agent_execution_tables.py @@ -0,0 +1,274 @@ +"""add_mission_agent_execution_tables + +Revision ID: b4b3b2b1b0a9 +Revises: 0f7082711f20 +Create Date: 2026-04-15 00:00:00.000000 + +""" + +from typing import Sequence, Union + +import sqlalchemy as sa +from sqlalchemy.dialects import postgresql + +from alembic import op + +# revision identifiers, used by Alembic. +revision: str = "b4b3b2b1b0a9" +down_revision: Union[str, None] = "0f7082711f20" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + # --- Enum types --- + op.execute(""" + DO $$ BEGIN + CREATE TYPE missionstatus AS ENUM ('backlog','todo','in_progress','in_review','done','blocked','cancelled'); + EXCEPTION + WHEN duplicate_object THEN null; + END $$; + """) + op.execute(""" + DO $$ BEGIN + CREATE TYPE missionpriority AS ENUM ('none','low','medium','high','urgent'); + EXCEPTION + WHEN duplicate_object THEN null; + END $$; + """) + op.execute(""" + DO $$ BEGIN + CREATE TYPE agentstatus AS ENUM ('idle','working','blocked','error','offline'); + EXCEPTION + WHEN duplicate_object THEN null; + END $$; + """) + op.execute(""" + DO $$ BEGIN + CREATE TYPE missionexecutionstatus AS ENUM ('queued','dispatched','running','interrupt_wait','approval_wait','completed','failed','cancelled'); + EXCEPTION + WHEN duplicate_object THEN null; + END $$; + """) + op.execute(""" + DO $$ BEGIN + CREATE TYPE executionsource AS ENUM ('mission','chat','graph','coordinator','api'); + EXCEPTION + WHEN duplicate_object THEN null; + END $$; + """) + + # --- missions --- + op.create_table( + "missions", + sa.Column("id", postgresql.UUID(as_uuid=True), primary_key=True), + sa.Column( + "workspace_id", + postgresql.UUID(as_uuid=True), + sa.ForeignKey("workspaces.id", ondelete="CASCADE"), + nullable=False, + ), + sa.Column("title", sa.String(500), nullable=False), + sa.Column("description", sa.Text(), nullable=True), + sa.Column("objective", sa.Text(), nullable=True), + sa.Column( + "status", + postgresql.ENUM( + "backlog", + "todo", + "in_progress", + "in_review", + "done", + "blocked", + "cancelled", + name="missionstatus", + create_type=False, + ), + nullable=False, + server_default="backlog", + ), + sa.Column( + "priority", + postgresql.ENUM("none", "low", "medium", "high", "urgent", name="missionpriority", create_type=False), + nullable=False, + server_default="none", + ), + sa.Column("assignee_type", sa.String(50), nullable=True), + sa.Column("assignee_id", postgresql.UUID(as_uuid=True), nullable=True), + sa.Column("creator_id", sa.String(255), sa.ForeignKey("user.id", ondelete="CASCADE"), nullable=False), + sa.Column( + "parent_mission_id", + postgresql.UUID(as_uuid=True), + sa.ForeignKey("missions.id", ondelete="SET NULL"), + nullable=True, + ), + sa.Column("current_execution_id", postgresql.UUID(as_uuid=True), nullable=True), + sa.Column("due_date", sa.DateTime(timezone=True), nullable=True), + sa.Column("position", sa.Float(), nullable=False, server_default="0.0"), + sa.Column("tags", postgresql.JSONB(), nullable=True), + sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False), + sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False), + ) + op.create_index("missions_workspace_status_idx", "missions", ["workspace_id", "status"]) + op.create_index("missions_assignee_idx", "missions", ["assignee_type", "assignee_id"]) + op.create_index("missions_creator_idx", "missions", ["creator_id", "created_at"]) + + # --- agent_profiles --- + op.create_table( + "agent_profiles", + sa.Column("id", postgresql.UUID(as_uuid=True), primary_key=True), + sa.Column( + "workspace_id", + postgresql.UUID(as_uuid=True), + sa.ForeignKey("workspaces.id", ondelete="CASCADE"), + nullable=False, + ), + sa.Column("name", sa.String(255), nullable=False), + sa.Column("avatar", sa.String(500), nullable=True), + sa.Column("description", sa.Text(), nullable=True), + sa.Column("runtime_type", sa.String(50), nullable=False), + sa.Column( + "status", + postgresql.ENUM("idle", "working", "blocked", "error", "offline", name="agentstatus", create_type=False), + nullable=False, + server_default="offline", + ), + sa.Column("max_concurrent_tasks", sa.Integer(), nullable=False, server_default="1"), + sa.Column("skill_ids", postgresql.JSONB(), nullable=True), + sa.Column("instructions", sa.Text(), nullable=True), + sa.Column("custom_env", postgresql.JSONB(), nullable=True), + sa.Column("runtime_config", postgresql.JSONB(), nullable=True), + sa.Column("visibility", sa.String(50), nullable=False, server_default="workspace"), + sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False), + sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False), + ) + op.create_index("agent_profiles_workspace_idx", "agent_profiles", ["workspace_id"]) + op.create_index("agent_profiles_workspace_status_idx", "agent_profiles", ["workspace_id", "status"]) + + # --- executions --- + op.create_table( + "executions", + sa.Column("id", postgresql.UUID(as_uuid=True), primary_key=True), + sa.Column( + "workspace_id", + postgresql.UUID(as_uuid=True), + sa.ForeignKey("workspaces.id", ondelete="CASCADE"), + nullable=False, + ), + sa.Column("user_id", sa.String(255), sa.ForeignKey("user.id", ondelete="CASCADE"), nullable=False), + sa.Column( + "source", + postgresql.ENUM( + "mission", "chat", "graph", "coordinator", "api", name="executionsource", create_type=False + ), + nullable=False, + ), + sa.Column("source_id", sa.String(255), nullable=True), + sa.Column( + "status", + postgresql.ENUM( + "queued", + "dispatched", + "running", + "interrupt_wait", + "approval_wait", + "completed", + "failed", + "cancelled", + name="missionexecutionstatus", + create_type=False, + ), + nullable=False, + server_default="queued", + ), + sa.Column("title", sa.String(500), nullable=True), + sa.Column( + "mission_id", + postgresql.UUID(as_uuid=True), + sa.ForeignKey("missions.id", ondelete="SET NULL"), + nullable=True, + ), + sa.Column( + "agent_profile_id", + postgresql.UUID(as_uuid=True), + sa.ForeignKey("agent_profiles.id", ondelete="SET NULL"), + nullable=True, + ), + sa.Column( + "parent_execution_id", + postgresql.UUID(as_uuid=True), + sa.ForeignKey("executions.id", ondelete="SET NULL"), + nullable=True, + ), + sa.Column("result_summary", postgresql.JSONB(), nullable=True), + sa.Column("error_code", sa.String(100), nullable=True), + sa.Column("error_message", sa.Text(), nullable=True), + sa.Column("runtime_type", sa.String(50), nullable=False), + sa.Column("runtime_config", postgresql.JSONB(), nullable=True), + sa.Column("container_id", sa.String(255), nullable=True), + sa.Column("started_at", sa.DateTime(timezone=True), nullable=True), + sa.Column("finished_at", sa.DateTime(timezone=True), nullable=True), + sa.Column("last_heartbeat_at", sa.DateTime(timezone=True), nullable=True), + sa.Column("last_seq", sa.BigInteger(), nullable=False, server_default="0"), + sa.Column("prior_session_id", sa.String(255), nullable=True), + sa.Column("session_id", sa.String(255), nullable=True), + sa.Column("work_dir", sa.String(500), nullable=True), + sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False), + sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False), + ) + op.create_index("executions_workspace_status_idx", "executions", ["workspace_id", "status"]) + op.create_index("executions_mission_idx", "executions", ["mission_id"]) + op.create_index("executions_agent_profile_idx", "executions", ["agent_profile_id"]) + op.create_index("executions_parent_idx", "executions", ["parent_execution_id"]) + op.create_index("executions_user_created_idx", "executions", ["user_id", "created_at"]) + + # --- execution_events --- + op.create_table( + "execution_events", + sa.Column("id", postgresql.UUID(as_uuid=True), primary_key=True), + sa.Column( + "execution_id", + postgresql.UUID(as_uuid=True), + sa.ForeignKey("executions.id", ondelete="CASCADE"), + nullable=False, + ), + sa.Column("seq", sa.BigInteger(), nullable=False), + sa.Column("event_type", sa.String(100), nullable=False), + sa.Column("payload", postgresql.JSONB(), nullable=False, server_default="{}"), + sa.Column("trace_id", postgresql.UUID(as_uuid=True), nullable=True), + sa.Column("observation_id", postgresql.UUID(as_uuid=True), nullable=True), + sa.Column("parent_observation_id", postgresql.UUID(as_uuid=True), nullable=True), + sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False), + sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False), + ) + op.create_unique_constraint("uq_execution_events_exec_seq", "execution_events", ["execution_id", "seq"]) + op.create_index("execution_events_exec_created_idx", "execution_events", ["execution_id", "created_at"]) + + # --- execution_snapshots --- + op.create_table( + "execution_snapshots", + sa.Column( + "execution_id", + postgresql.UUID(as_uuid=True), + sa.ForeignKey("executions.id", ondelete="CASCADE"), + primary_key=True, + ), + sa.Column("last_seq", sa.BigInteger(), nullable=False, server_default="0"), + sa.Column("status", sa.String(100), nullable=False), + sa.Column("projection", postgresql.JSONB(), nullable=False, server_default="{}"), + sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False), + sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False), + ) + + +def downgrade() -> None: + op.drop_table("execution_snapshots") + op.drop_table("execution_events") + op.drop_table("executions") + op.drop_table("agent_profiles") + op.drop_table("missions") + op.execute("DROP TYPE IF EXISTS executionsource") + op.execute("DROP TYPE IF EXISTS missionexecutionstatus") + op.execute("DROP TYPE IF EXISTS agentstatus") + op.execute("DROP TYPE IF EXISTS missionpriority") + op.execute("DROP TYPE IF EXISTS missionstatus") diff --git a/backend/alembic/versions/20260416_000000_c5c4c3c2c1c0_add_mission_comments.py b/backend/alembic/versions/20260416_000000_c5c4c3c2c1c0_add_mission_comments.py new file mode 100644 index 000000000..c39a6113c --- /dev/null +++ b/backend/alembic/versions/20260416_000000_c5c4c3c2c1c0_add_mission_comments.py @@ -0,0 +1,111 @@ +"""add_mission_comments + +Revision ID: c5c4c3c2c1c0 +Revises: b4b3b2b1b0a9 +Create Date: 2026-04-16 00:00:00.000000 + +""" + +from typing import Sequence, Union + +import sqlalchemy as sa +from sqlalchemy.dialects import postgresql + +from alembic import op + +# revision identifiers, used by Alembic. +revision: str = "c5c4c3c2c1c0" +down_revision: Union[str, None] = "b4b3b2b1b0a9" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + # --- Enum types --- + op.execute(""" + DO $$ BEGIN + CREATE TYPE commentauthortype AS ENUM ('member','agent'); + EXCEPTION + WHEN duplicate_object THEN null; + END $$; + """) + op.execute(""" + DO $$ BEGIN + CREATE TYPE commenttype AS ENUM ('comment','status_change','progress_update','system'); + EXCEPTION + WHEN duplicate_object THEN null; + END $$; + """) + + # --- mission_comments --- + op.create_table( + "mission_comments", + sa.Column("id", postgresql.UUID(as_uuid=True), primary_key=True), + sa.Column( + "mission_id", + postgresql.UUID(as_uuid=True), + sa.ForeignKey("missions.id", ondelete="CASCADE"), + nullable=False, + ), + sa.Column( + "workspace_id", + postgresql.UUID(as_uuid=True), + sa.ForeignKey("workspaces.id", ondelete="CASCADE"), + nullable=False, + ), + sa.Column( + "author_type", + postgresql.ENUM("member", "agent", name="commentauthortype", create_type=False), + nullable=False, + ), + sa.Column("author_id", sa.String(255), nullable=False), + sa.Column("content", sa.Text(), nullable=False), + sa.Column( + "type", + postgresql.ENUM( + "comment", "status_change", "progress_update", "system", name="commenttype", create_type=False + ), + nullable=False, + server_default="comment", + ), + sa.Column( + "parent_comment_id", + postgresql.UUID(as_uuid=True), + sa.ForeignKey("mission_comments.id", ondelete="SET NULL"), + nullable=True, + ), + sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False), + sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False), + ) + op.create_index("mission_comments_mission_created_idx", "mission_comments", ["mission_id", "created_at"]) + op.create_index("mission_comments_workspace_idx", "mission_comments", ["workspace_id"]) + op.create_index("mission_comments_author_idx", "mission_comments", ["author_type", "author_id"]) + op.create_index("mission_comments_parent_idx", "mission_comments", ["parent_comment_id"]) + + # --- Add trigger_comment_id to executions --- + op.add_column( + "executions", + sa.Column( + "trigger_comment_id", + postgresql.UUID(as_uuid=True), + sa.ForeignKey("mission_comments.id", ondelete="SET NULL"), + nullable=True, + ), + ) + op.create_index("executions_trigger_comment_idx", "executions", ["trigger_comment_id"]) + + # --- Dedup guard: at most one pending execution per (mission, agent) --- + op.execute(""" + CREATE UNIQUE INDEX uq_executions_mission_agent_pending + ON executions (mission_id, agent_profile_id) + WHERE status IN ('queued', 'dispatched'); + """) + + +def downgrade() -> None: + op.execute("DROP INDEX IF EXISTS uq_executions_mission_agent_pending") + op.drop_index("executions_trigger_comment_idx", table_name="executions") + op.drop_column("executions", "trigger_comment_id") + op.drop_table("mission_comments") + op.execute("DROP TYPE IF EXISTS commenttype") + op.execute("DROP TYPE IF EXISTS commentauthortype") diff --git a/backend/alembic/versions/20260418_000000_d6d5d4d3d2d1_remove_mission_blocked_status.py b/backend/alembic/versions/20260418_000000_d6d5d4d3d2d1_remove_mission_blocked_status.py new file mode 100644 index 000000000..05a5d63a8 --- /dev/null +++ b/backend/alembic/versions/20260418_000000_d6d5d4d3d2d1_remove_mission_blocked_status.py @@ -0,0 +1,46 @@ +"""remove_mission_blocked_status + +Revision ID: d6d5d4d3d2d1 +Revises: c5c4c3c2c1c0 +Create Date: 2026-04-18 00:00:00.000000 + +""" + +from typing import Sequence, Union + +from alembic import op + +revision: str = "d6d5d4d3d2d1" +down_revision: Union[str, None] = "c5c4c3c2c1c0" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + # Move any blocked missions to todo before dropping the enum value + op.execute("UPDATE missions SET status = 'todo' WHERE status = 'blocked'") + + # Drop the default before changing type — PG can't auto-cast the default + op.execute("ALTER TABLE missions ALTER COLUMN status DROP DEFAULT") + + # PostgreSQL cannot DROP a value from an existing enum, so recreate it + op.execute("ALTER TYPE missionstatus RENAME TO missionstatus_old") + op.execute("CREATE TYPE missionstatus AS ENUM ('backlog','todo','in_progress','in_review','done','cancelled')") + op.execute("ALTER TABLE missions ALTER COLUMN status TYPE missionstatus USING status::text::missionstatus") + op.execute("DROP TYPE missionstatus_old") + + # Restore the default + op.execute("ALTER TABLE missions ALTER COLUMN status SET DEFAULT 'backlog'") + + +def downgrade() -> None: + op.execute("ALTER TABLE missions ALTER COLUMN status DROP DEFAULT") + + op.execute("ALTER TYPE missionstatus RENAME TO missionstatus_old") + op.execute( + "CREATE TYPE missionstatus AS ENUM ('backlog','todo','in_progress','in_review','done','blocked','cancelled')" + ) + op.execute("ALTER TABLE missions ALTER COLUMN status TYPE missionstatus USING status::text::missionstatus") + op.execute("DROP TYPE missionstatus_old") + + op.execute("ALTER TABLE missions ALTER COLUMN status SET DEFAULT 'backlog'") diff --git a/backend/alembic/versions/20260418_000001_e7e6e5e4e3e2_add_mission_auto_approve.py b/backend/alembic/versions/20260418_000001_e7e6e5e4e3e2_add_mission_auto_approve.py new file mode 100644 index 000000000..51778181c --- /dev/null +++ b/backend/alembic/versions/20260418_000001_e7e6e5e4e3e2_add_mission_auto_approve.py @@ -0,0 +1,29 @@ +"""add_mission_auto_approve + +Revision ID: e7e6e5e4e3e2 +Revises: d6d5d4d3d2d1 +Create Date: 2026-04-18 00:00:01.000000 + +""" + +from typing import Sequence, Union + +import sqlalchemy as sa + +from alembic import op + +revision: str = "e7e6e5e4e3e2" +down_revision: Union[str, None] = "d6d5d4d3d2d1" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + op.add_column( + "missions", + sa.Column("auto_approve", sa.Boolean(), nullable=False, server_default=sa.text("false")), + ) + + +def downgrade() -> None: + op.drop_column("missions", "auto_approve") diff --git a/backend/alembic/versions/20260418_000002_f8f7f6f5f4f3_add_current_execution_fk.py b/backend/alembic/versions/20260418_000002_f8f7f6f5f4f3_add_current_execution_fk.py new file mode 100644 index 000000000..3e7a1d876 --- /dev/null +++ b/backend/alembic/versions/20260418_000002_f8f7f6f5f4f3_add_current_execution_fk.py @@ -0,0 +1,31 @@ +"""Add FK constraint on missions.current_execution_id -> executions.id""" + +from typing import Union + +from alembic import op + +revision: str = "f8f7f6f5f4f3" +down_revision: Union[str, None] = "e7e6e5e4e3e2" +branch_labels = None +depends_on = None + + +def upgrade() -> None: + op.execute(""" + UPDATE missions + SET current_execution_id = NULL + WHERE current_execution_id IS NOT NULL + AND current_execution_id NOT IN (SELECT id FROM executions) + """) + op.create_foreign_key( + "fk_missions_current_execution_id", + "missions", + "executions", + ["current_execution_id"], + ["id"], + ondelete="SET NULL", + ) + + +def downgrade() -> None: + op.drop_constraint("fk_missions_current_execution_id", "missions", type_="foreignkey") diff --git a/backend/alembic/versions/20260418_000003_a9a8a7a6a5a4_drop_source_id_column.py b/backend/alembic/versions/20260418_000003_a9a8a7a6a5a4_drop_source_id_column.py new file mode 100644 index 000000000..9442c8732 --- /dev/null +++ b/backend/alembic/versions/20260418_000003_a9a8a7a6a5a4_drop_source_id_column.py @@ -0,0 +1,20 @@ +"""Drop redundant source_id column from executions table.""" + +from typing import Union + +from alembic import op + +revision: str = "a9a8a7a6a5a4" +down_revision: Union[str, None] = "f8f7f6f5f4f3" +branch_labels = None +depends_on = None + + +def upgrade() -> None: + op.drop_column("executions", "source_id") + + +def downgrade() -> None: + import sqlalchemy as sa + + op.add_column("executions", sa.Column("source_id", sa.String(255), nullable=True)) diff --git a/backend/alembic/versions/20260421_000000_b1b2b3b4b5b6_create_agents_table.py b/backend/alembic/versions/20260421_000000_b1b2b3b4b5b6_create_agents_table.py new file mode 100644 index 000000000..9b9736c77 --- /dev/null +++ b/backend/alembic/versions/20260421_000000_b1b2b3b4b5b6_create_agents_table.py @@ -0,0 +1,36 @@ +"""Create agents table.""" + +from typing import Union + +import sqlalchemy as sa +from sqlalchemy.dialects.postgresql import UUID + +from alembic import op + +revision: str = "b1b2b3b4b5b6" +down_revision: Union[str, None] = "a9a8a7a6a5a4" +branch_labels = None +depends_on = None + + +def upgrade() -> None: + op.create_table( + "agents", + sa.Column("id", UUID(as_uuid=True), server_default=sa.text("gen_random_uuid()"), primary_key=True), + sa.Column("workspace_id", UUID(as_uuid=True), sa.ForeignKey("workspaces.id"), nullable=False), + sa.Column("name", sa.String(255), nullable=False), + sa.Column("slug", sa.String(255), nullable=False), + sa.Column("description", sa.Text(), nullable=True), + sa.Column("avatar", sa.String(500), nullable=True), + sa.Column("status", sa.String(20), nullable=False, server_default="draft"), + sa.Column("current_draft_version_id", UUID(as_uuid=True), nullable=True), + sa.Column("active_release_id", UUID(as_uuid=True), nullable=True), + sa.Column("created_by", sa.String(255), sa.ForeignKey("user.id"), nullable=False), + sa.Column("created_at", sa.DateTime(timezone=True), nullable=False, server_default=sa.func.now()), + sa.Column("updated_at", sa.DateTime(timezone=True), nullable=False, server_default=sa.func.now()), + sa.UniqueConstraint("workspace_id", "slug", name="uq_agents_workspace_id_slug"), + ) + + +def downgrade() -> None: + op.drop_table("agents") diff --git a/backend/alembic/versions/20260421_000001_c2c3c4c5c6c7_create_agent_versions_table.py b/backend/alembic/versions/20260421_000001_c2c3c4c5c6c7_create_agent_versions_table.py new file mode 100644 index 000000000..0abdff4c1 --- /dev/null +++ b/backend/alembic/versions/20260421_000001_c2c3c4c5c6c7_create_agent_versions_table.py @@ -0,0 +1,44 @@ +"""Create agent_versions table and add FK from agents.current_draft_version_id.""" + +from typing import Union + +import sqlalchemy as sa +from sqlalchemy.dialects.postgresql import JSONB, UUID + +from alembic import op + +revision: str = "c2c3c4c5c6c7" +down_revision: Union[str, None] = "b1b2b3b4b5b6" +branch_labels = None +depends_on = None + + +def upgrade() -> None: + op.create_table( + "agent_versions", + sa.Column("id", UUID(as_uuid=True), server_default=sa.text("gen_random_uuid()"), primary_key=True), + sa.Column("agent_id", UUID(as_uuid=True), sa.ForeignKey("agents.id", ondelete="CASCADE"), nullable=False), + sa.Column("version_number", sa.Integer(), nullable=False), + sa.Column("status", sa.String(20), nullable=False, server_default="draft"), + sa.Column("source_kind", sa.String(20), nullable=False, server_default="manual"), + sa.Column("definition_kind", sa.String(20), nullable=False), + sa.Column("definition_payload", JSONB(), nullable=False, server_default="{}"), + sa.Column("capability_manifest", JSONB(), nullable=False, server_default="{}"), + sa.Column("changelog", sa.Text(), nullable=True), + sa.Column("created_by", sa.String(255), sa.ForeignKey("user.id"), nullable=False), + sa.Column("created_at", sa.DateTime(timezone=True), nullable=False, server_default=sa.func.now()), + sa.UniqueConstraint("agent_id", "version_number", name="uq_agent_versions_agent_id_version_number"), + ) + + op.create_foreign_key( + "fk_agents_current_draft_version_id", + "agents", + "agent_versions", + ["current_draft_version_id"], + ["id"], + ) + + +def downgrade() -> None: + op.drop_constraint("fk_agents_current_draft_version_id", "agents", type_="foreignkey") + op.drop_table("agent_versions") diff --git a/backend/alembic/versions/20260421_000002_d3d4d5d6d7d8_drop_agent_profiles.py b/backend/alembic/versions/20260421_000002_d3d4d5d6d7d8_drop_agent_profiles.py new file mode 100644 index 000000000..9dcfb4ba5 --- /dev/null +++ b/backend/alembic/versions/20260421_000002_d3d4d5d6d7d8_drop_agent_profiles.py @@ -0,0 +1,28 @@ +"""drop agent_profiles table + +Revision ID: d3d4d5d6d7d8 +Revises: c2c3c4c5c6c7 +Create Date: 2026-04-21 +""" + +from alembic import op + +revision = "d3d4d5d6d7d8" +down_revision = "c2c3c4c5c6c7" +branch_labels = None +depends_on = None + + +def upgrade(): + # Drop the FK index on executions first, then the FK constraint, + # before dropping the referenced table. + # Use IF EXISTS at SQL level — Python try/except won't work because a + # failed statement aborts the entire PostgreSQL transaction. + op.execute("DROP INDEX IF EXISTS executions_agent_profile_idx") + op.execute("ALTER TABLE executions DROP CONSTRAINT IF EXISTS executions_agent_profile_id_fkey") + op.execute("ALTER TABLE executions DROP CONSTRAINT IF EXISTS fk_executions_agent_profile_id_agent_profiles") + op.drop_table("agent_profiles") + + +def downgrade(): + pass # no data migration, no rollback diff --git a/backend/alembic/versions/20260421_000003_e4e5e6e7e8e9_create_agent_releases.py b/backend/alembic/versions/20260421_000003_e4e5e6e7e8e9_create_agent_releases.py new file mode 100644 index 000000000..b2e671505 --- /dev/null +++ b/backend/alembic/versions/20260421_000003_e4e5e6e7e8e9_create_agent_releases.py @@ -0,0 +1,40 @@ +"""create agent_releases table and add FK on agents.active_release_id""" + +import sqlalchemy as sa +from sqlalchemy.dialects.postgresql import JSONB, UUID + +from alembic import op + +revision = "e4e5e6e7e8e9" +down_revision = "d3d4d5d6d7d8" + + +def upgrade(): + op.create_table( + "agent_releases", + sa.Column("id", UUID(as_uuid=True), server_default=sa.text("gen_random_uuid()"), primary_key=True), + sa.Column("agent_version_id", UUID(as_uuid=True), sa.ForeignKey("agent_versions.id"), nullable=False), + sa.Column("release_number", sa.Integer, nullable=False), + sa.Column("status", sa.String(20), nullable=False, server_default="building"), + sa.Column("runtime_kind", sa.String(20), nullable=False), + sa.Column("builder_kind", sa.String(20), nullable=True), + sa.Column("executable_ref", JSONB, nullable=True), + sa.Column("runtime_binding", JSONB, nullable=False, server_default="{}"), + sa.Column("published_by", sa.String(255), sa.ForeignKey("user.id"), nullable=True), + sa.Column("published_at", sa.DateTime(timezone=True), nullable=True), + sa.Column("retired_at", sa.DateTime(timezone=True), nullable=True), + sa.UniqueConstraint("agent_version_id", "release_number", name="uq_agent_releases_version_number"), + ) + # Add the circular FK from agents.active_release_id -> agent_releases.id + op.create_foreign_key( + "fk_agents_active_release", + "agents", + "agent_releases", + ["active_release_id"], + ["id"], + ) + + +def downgrade(): + op.drop_constraint("fk_agents_active_release", "agents", type_="foreignkey") + op.drop_table("agent_releases") diff --git a/backend/alembic/versions/20260421_000004_f5f6f7f8f9f0_drop_graph_deployment_version.py b/backend/alembic/versions/20260421_000004_f5f6f7f8f9f0_drop_graph_deployment_version.py new file mode 100644 index 000000000..2f0156e45 --- /dev/null +++ b/backend/alembic/versions/20260421_000004_f5f6f7f8f9f0_drop_graph_deployment_version.py @@ -0,0 +1,23 @@ +"""drop graph_deployment_version + +Revision ID: f5f6f7f8f9f0 +Revises: e4e5e6e7e8e9 +Create Date: 2026-04-21 + +""" + +from alembic import op + +# revision identifiers, used by Alembic. +revision = "f5f6f7f8f9f0" +down_revision = "e4e5e6e7e8e9" +branch_labels = None +depends_on = None + + +def upgrade() -> None: + op.execute("DROP TABLE IF EXISTS graph_deployment_version CASCADE") + + +def downgrade() -> None: + pass diff --git a/backend/alembic/versions/20260421_000005_a1b2c3d4e5f6_create_threads_messages.py b/backend/alembic/versions/20260421_000005_a1b2c3d4e5f6_create_threads_messages.py new file mode 100644 index 000000000..dcbdc4a0b --- /dev/null +++ b/backend/alembic/versions/20260421_000005_a1b2c3d4e5f6_create_threads_messages.py @@ -0,0 +1,38 @@ +"""create threads and messages tables""" + +import sqlalchemy as sa +from sqlalchemy.dialects.postgresql import JSONB, UUID + +from alembic import op + +revision = "aa11bb22cc33" +down_revision = "f5f6f7f8f9f0" + + +def upgrade(): + op.create_table( + "threads", + sa.Column("id", UUID(as_uuid=True), server_default=sa.text("gen_random_uuid()"), primary_key=True), + sa.Column("agent_id", UUID(as_uuid=True), sa.ForeignKey("agents.id"), nullable=False), + sa.Column("workspace_id", UUID(as_uuid=True), sa.ForeignKey("workspaces.id"), nullable=False), + sa.Column("title", sa.String(500), nullable=True), + sa.Column("status", sa.String(20), nullable=False, server_default="active"), + sa.Column("created_by", sa.String(255), sa.ForeignKey("user.id"), nullable=False), + sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False), + sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False), + ) + op.create_table( + "thread_messages", + sa.Column("id", UUID(as_uuid=True), server_default=sa.text("gen_random_uuid()"), primary_key=True), + sa.Column("thread_id", UUID(as_uuid=True), sa.ForeignKey("threads.id", ondelete="CASCADE"), nullable=False), + sa.Column("run_id", UUID(as_uuid=True), nullable=True), # FK added in Phase 4 + sa.Column("execution_id", UUID(as_uuid=True), nullable=True), # FK added in Phase 4 + sa.Column("role", sa.String(20), nullable=False), + sa.Column("content", JSONB, nullable=False), + sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False), + ) + + +def downgrade(): + op.drop_table("thread_messages") + op.drop_table("threads") diff --git a/backend/alembic/versions/20260421_000006_b2c3d4e5f6a7_drop_conversations.py b/backend/alembic/versions/20260421_000006_b2c3d4e5f6a7_drop_conversations.py new file mode 100644 index 000000000..f9a956e5a --- /dev/null +++ b/backend/alembic/versions/20260421_000006_b2c3d4e5f6a7_drop_conversations.py @@ -0,0 +1,15 @@ +"""drop conversations table""" + +from alembic import op + +revision = "bb22cc33dd44" +down_revision = "aa11bb22cc33" + + +def upgrade(): + op.execute("ALTER TABLE messages DROP CONSTRAINT IF EXISTS fk_messages_thread_id_conversations") + op.drop_table("conversations") + + +def downgrade(): + pass diff --git a/backend/alembic/versions/20260421_000007_c3d4e5f6a7b8_drop_legacy_execution_tables.py b/backend/alembic/versions/20260421_000007_c3d4e5f6a7b8_drop_legacy_execution_tables.py new file mode 100644 index 000000000..922584217 --- /dev/null +++ b/backend/alembic/versions/20260421_000007_c3d4e5f6a7b8_drop_legacy_execution_tables.py @@ -0,0 +1,31 @@ +"""drop legacy execution tables""" + +from alembic import op + +revision = "cc33dd44ee55" +down_revision = "bb22cc33dd44" + + +def upgrade(): + # Use SQL-level IF EXISTS / CASCADE — Python try/except won't work + # because a failed statement aborts the entire PostgreSQL transaction. + + # 1. Drop snapshot tables first (they depend on main tables) + op.execute("DROP TABLE IF EXISTS execution_snapshots") + op.execute("DROP TABLE IF EXISTS agent_run_snapshots") + + # 2. Drop event tables + op.execute("DROP TABLE IF EXISTS execution_events CASCADE") + op.execute("DROP TABLE IF EXISTS agent_run_events CASCADE") + + # 3. Remove FK from missions table + op.execute("ALTER TABLE missions DROP CONSTRAINT IF EXISTS fk_missions_current_execution") + op.execute("ALTER TABLE missions DROP COLUMN IF EXISTS current_execution_id") + + # 4. Drop main tables (CASCADE handles any remaining dependent FKs) + op.execute("DROP TABLE IF EXISTS executions CASCADE") + op.execute("DROP TABLE IF EXISTS agent_runs CASCADE") + + +def downgrade(): + pass diff --git a/backend/alembic/versions/20260421_000008_d4e5f6a7b8c9_create_new_execution_chain.py b/backend/alembic/versions/20260421_000008_d4e5f6a7b8c9_create_new_execution_chain.py new file mode 100644 index 000000000..1f87d3c01 --- /dev/null +++ b/backend/alembic/versions/20260421_000008_d4e5f6a7b8c9_create_new_execution_chain.py @@ -0,0 +1,74 @@ +"""create new execution chain tables""" + +import sqlalchemy as sa +from sqlalchemy.dialects.postgresql import JSONB, UUID + +from alembic import op + +revision = "dd44ee55ff66" +down_revision = "cc33dd44ee55" + + +def upgrade(): + op.create_table( + "agent_runs", + sa.Column("id", UUID(as_uuid=True), server_default=sa.text("gen_random_uuid()"), primary_key=True), + sa.Column("release_id", UUID(as_uuid=True), sa.ForeignKey("agent_releases.id"), nullable=False), + sa.Column("workspace_id", UUID(as_uuid=True), sa.ForeignKey("workspaces.id"), nullable=False), + sa.Column("thread_id", UUID(as_uuid=True), sa.ForeignKey("threads.id"), nullable=True), + sa.Column("mission_id", UUID(as_uuid=True), sa.ForeignKey("missions.id"), nullable=True), + sa.Column("trigger_source", sa.String(20), nullable=False), + sa.Column("goal", sa.Text, nullable=True), + sa.Column("input_payload", JSONB, nullable=True), + sa.Column("status", sa.String(20), nullable=False, server_default="queued"), + sa.Column("current_execution_id", UUID(as_uuid=True), nullable=True), # FK added after executions + sa.Column("result_summary", sa.Text, nullable=True), + sa.Column("started_at", sa.DateTime(timezone=True), nullable=True), + sa.Column("ended_at", sa.DateTime(timezone=True), nullable=True), + sa.Column("created_by", sa.String(255), sa.ForeignKey("user.id"), nullable=True), + sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False), + ) + + op.create_table( + "executions", + sa.Column("id", UUID(as_uuid=True), server_default=sa.text("gen_random_uuid()"), primary_key=True), + sa.Column("run_id", UUID(as_uuid=True), sa.ForeignKey("agent_runs.id"), nullable=False), + sa.Column("parent_execution_id", UUID(as_uuid=True), sa.ForeignKey("executions.id"), nullable=True), + sa.Column("attempt_index", sa.Integer, nullable=False, server_default="1"), + sa.Column("executor_kind", sa.String(20), nullable=False), + sa.Column("runtime_session_ref", sa.String(500), nullable=True), + sa.Column("status", sa.String(20), nullable=False, server_default="pending"), + sa.Column("error", JSONB, nullable=True), + sa.Column("metrics", JSONB, nullable=True), + sa.Column("started_at", sa.DateTime(timezone=True), nullable=True), + sa.Column("ended_at", sa.DateTime(timezone=True), nullable=True), + sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False), + sa.UniqueConstraint("run_id", "attempt_index", name="uq_executions_run_attempt"), + ) + + # Circular FK: agent_runs.current_execution_id -> executions.id + op.create_foreign_key( + "fk_agent_runs_current_execution", + "agent_runs", + "executions", + ["current_execution_id"], + ["id"], + ) + + op.create_table( + "execution_events", + sa.Column("id", UUID(as_uuid=True), server_default=sa.text("gen_random_uuid()"), primary_key=True), + sa.Column("execution_id", UUID(as_uuid=True), sa.ForeignKey("executions.id"), nullable=False), + sa.Column("sequence_no", sa.Integer, nullable=False), + sa.Column("event_type", sa.String(50), nullable=False), + sa.Column("payload", JSONB, nullable=False, server_default="{}"), + sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False), + sa.UniqueConstraint("execution_id", "sequence_no", name="uq_execution_events_seq"), + ) + + +def downgrade(): + op.drop_table("execution_events") + op.drop_constraint("fk_agent_runs_current_execution", "agent_runs", type_="foreignkey") + op.drop_table("executions") + op.drop_table("agent_runs") diff --git a/backend/alembic/versions/20260421_000009_e5f6a7b8c9d0_add_message_fks.py b/backend/alembic/versions/20260421_000009_e5f6a7b8c9d0_add_message_fks.py new file mode 100644 index 000000000..814cfcfda --- /dev/null +++ b/backend/alembic/versions/20260421_000009_e5f6a7b8c9d0_add_message_fks.py @@ -0,0 +1,16 @@ +"""add run_id and execution_id FKs on thread_messages""" + +from alembic import op + +revision = "ee55ff66aa77" +down_revision = "dd44ee55ff66" + + +def upgrade(): + op.create_foreign_key("fk_messages_run", "thread_messages", "agent_runs", ["run_id"], ["id"]) + op.create_foreign_key("fk_messages_execution", "thread_messages", "executions", ["execution_id"], ["id"]) + + +def downgrade(): + op.drop_constraint("fk_messages_execution", "thread_messages", type_="foreignkey") + op.drop_constraint("fk_messages_run", "thread_messages", type_="foreignkey") diff --git a/backend/alembic/versions/20260421_000010_f6a7b8c9d0e1_create_artifacts.py b/backend/alembic/versions/20260421_000010_f6a7b8c9d0e1_create_artifacts.py new file mode 100644 index 000000000..1feb81dfc --- /dev/null +++ b/backend/alembic/versions/20260421_000010_f6a7b8c9d0e1_create_artifacts.py @@ -0,0 +1,25 @@ +"""create artifacts table""" + +import sqlalchemy as sa +from sqlalchemy.dialects.postgresql import JSONB, UUID + +from alembic import op + +revision = "ff66aa77bb88" +down_revision = "ee55ff66aa77" + + +def upgrade(): + op.create_table( + "artifacts", + sa.Column("id", UUID(as_uuid=True), server_default=sa.text("gen_random_uuid()"), primary_key=True), + sa.Column("execution_id", UUID(as_uuid=True), sa.ForeignKey("executions.id"), nullable=False), + sa.Column("kind", sa.String(50), nullable=False), + sa.Column("uri", sa.Text, nullable=False), + sa.Column("metadata", JSONB, nullable=True), + sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False), + ) + + +def downgrade(): + op.drop_table("artifacts") diff --git a/backend/alembic/versions/20260421_000011_z1a2b3c4d5e6_drop_legacy_graph_tables.py b/backend/alembic/versions/20260421_000011_z1a2b3c4d5e6_drop_legacy_graph_tables.py new file mode 100644 index 000000000..11adf4b45 --- /dev/null +++ b/backend/alembic/versions/20260421_000011_z1a2b3c4d5e6_drop_legacy_graph_tables.py @@ -0,0 +1,15 @@ +"""drop legacy graph tables""" + +from alembic import op + +revision = "z1a2b3c4d5e6" +down_revision = "ff66aa77bb88" + + +def upgrade(): + for table in ["graph_node_secrets", "graph_executions", "graph_edges", "graph_nodes", "graphs"]: + op.execute(f"DROP TABLE IF EXISTS {table} CASCADE") + + +def downgrade(): + pass diff --git a/backend/alembic/versions/20260422_000000_g8h9i0j1k2l3_rename_missions_to_tasks.py b/backend/alembic/versions/20260422_000000_g8h9i0j1k2l3_rename_missions_to_tasks.py new file mode 100644 index 000000000..0ac830cb4 --- /dev/null +++ b/backend/alembic/versions/20260422_000000_g8h9i0j1k2l3_rename_missions_to_tasks.py @@ -0,0 +1,59 @@ +"""rename missions to tasks""" + +import sqlalchemy as sa +from sqlalchemy.dialects.postgresql import UUID + +from alembic import op + +revision = "g8h9i0j1k2l3" +down_revision = "z1a2b3c4d5e6" + + +def upgrade(): + # Rename table + op.rename_table("missions", "tasks") + + # Rename columns + op.alter_column("tasks", "assignee_id", new_column_name="agent_id") + op.alter_column("tasks", "objective", new_column_name="goal") + op.alter_column("tasks", "parent_mission_id", new_column_name="parent_task_id") + + # Drop assignee_type column (tasks always belong to agents) + op.execute("ALTER TABLE tasks DROP COLUMN IF EXISTS assignee_type") + + # Drop current_execution_id (replaced by latest_run_id) + # May already be dropped by a prior migration (_000007). + op.execute("ALTER TABLE tasks DROP COLUMN IF EXISTS current_execution_id") + + # Add latest_run_id column + op.add_column("tasks", sa.Column("latest_run_id", UUID(as_uuid=True), nullable=True)) + op.create_foreign_key("fk_tasks_latest_run", "tasks", "agent_runs", ["latest_run_id"], ["id"]) + + # Add FK from agent_id to agents table + # Clean up orphaned references first (old assignee_id values from agent_profiles) + op.execute(""" + UPDATE tasks SET agent_id = NULL + WHERE agent_id IS NOT NULL + AND agent_id NOT IN (SELECT id FROM agents) + """) + op.create_foreign_key("fk_tasks_agent", "tasks", "agents", ["agent_id"], ["id"]) + + # Add FK from parent_task_id to tasks table + op.create_foreign_key("fk_tasks_parent", "tasks", "tasks", ["parent_task_id"], ["id"], ondelete="SET NULL") + + # Update agent_runs table: rename mission_id FK reference + op.alter_column("agent_runs", "mission_id", new_column_name="task_id") + + # Update indexes — use SQL-level IF EXISTS to avoid transaction aborts + op.execute("DROP INDEX IF EXISTS missions_workspace_status_idx") + op.create_index("tasks_workspace_status_idx", "tasks", ["workspace_id", "status"]) + + op.execute("DROP INDEX IF EXISTS missions_assignee_idx") + op.create_index("tasks_agent_idx", "tasks", ["agent_id"]) + + op.execute("DROP INDEX IF EXISTS missions_creator_idx") + op.create_index("tasks_creator_idx", "tasks", ["creator_id", "created_at"]) + + +def downgrade(): + pass # no rollback in greenfield diff --git a/backend/alembic/versions/20260422_000001_h1i2j3k4l5m6_rename_comments_to_activities.py b/backend/alembic/versions/20260422_000001_h1i2j3k4l5m6_rename_comments_to_activities.py new file mode 100644 index 000000000..86325412f --- /dev/null +++ b/backend/alembic/versions/20260422_000001_h1i2j3k4l5m6_rename_comments_to_activities.py @@ -0,0 +1,121 @@ +"""Rename mission_comments to task_activities and update enums. + +Revision ID: h1i2j3k4l5m6 +Revises: g8h9i0j1k2l3 +Create Date: 2026-04-22 +""" + +from alembic import op + +revision = "h1i2j3k4l5m6" +down_revision = "g8h9i0j1k2l3" +branch_labels = None +depends_on = None + + +def upgrade() -> None: + # 1. Rename table + op.rename_table("mission_comments", "task_activities") + + # 2. Rename columns + op.alter_column("task_activities", "parent_comment_id", new_column_name="parent_activity_id") + op.alter_column("task_activities", "mission_id", new_column_name="task_id") + + # 3. Rename enum types + op.execute("ALTER TYPE commentauthortype RENAME TO activityauthortype") + op.execute("ALTER TYPE commenttype RENAME TO activitytype") + + # 4. Rename indexes + op.execute("ALTER INDEX IF EXISTS mission_comments_mission_created_idx RENAME TO task_activities_task_created_idx") + op.execute("ALTER INDEX IF EXISTS mission_comments_workspace_idx RENAME TO task_activities_workspace_idx") + op.execute("ALTER INDEX IF EXISTS mission_comments_author_idx RENAME TO task_activities_author_idx") + op.execute("ALTER INDEX IF EXISTS mission_comments_parent_idx RENAME TO task_activities_parent_idx") + + # 5. Rename foreign key constraints + op.execute(""" + DO $$ + DECLARE + r RECORD; + BEGIN + FOR r IN ( + SELECT conname FROM pg_constraint + WHERE conrelid = 'task_activities'::regclass + AND conname LIKE 'mission_comments%' + ) LOOP + EXECUTE 'ALTER TABLE task_activities RENAME CONSTRAINT ' + || quote_ident(r.conname) + || ' TO ' + || quote_ident(REPLACE(r.conname, 'mission_comments', 'task_activities')); + END LOOP; + END $$; + """) + + # 6. Rename self-referencing FK column name in constraint + op.execute(""" + DO $$ + DECLARE + r RECORD; + BEGIN + FOR r IN ( + SELECT conname FROM pg_constraint + WHERE conrelid = 'task_activities'::regclass + AND conname LIKE '%parent_comment%' + ) LOOP + EXECUTE 'ALTER TABLE task_activities RENAME CONSTRAINT ' + || quote_ident(r.conname) + || ' TO ' + || quote_ident(REPLACE(r.conname, 'parent_comment', 'parent_activity')); + END LOOP; + END $$; + """) + + +def downgrade() -> None: + # Reverse all renames + op.execute(""" + DO $$ + DECLARE + r RECORD; + BEGIN + FOR r IN ( + SELECT conname FROM pg_constraint + WHERE conrelid = 'task_activities'::regclass + AND conname LIKE '%parent_activity%' + ) LOOP + EXECUTE 'ALTER TABLE task_activities RENAME CONSTRAINT ' + || quote_ident(r.conname) + || ' TO ' + || quote_ident(REPLACE(r.conname, 'parent_activity', 'parent_comment')); + END LOOP; + END $$; + """) + + op.execute(""" + DO $$ + DECLARE + r RECORD; + BEGIN + FOR r IN ( + SELECT conname FROM pg_constraint + WHERE conrelid = 'task_activities'::regclass + AND conname LIKE 'task_activities%' + ) LOOP + EXECUTE 'ALTER TABLE task_activities RENAME CONSTRAINT ' + || quote_ident(r.conname) + || ' TO ' + || quote_ident(REPLACE(r.conname, 'task_activities', 'mission_comments')); + END LOOP; + END $$; + """) + + op.execute("ALTER INDEX IF EXISTS task_activities_parent_idx RENAME TO mission_comments_parent_idx") + op.execute("ALTER INDEX IF EXISTS task_activities_author_idx RENAME TO mission_comments_author_idx") + op.execute("ALTER INDEX IF EXISTS task_activities_workspace_idx RENAME TO mission_comments_workspace_idx") + op.execute("ALTER INDEX IF EXISTS task_activities_task_created_idx RENAME TO mission_comments_mission_created_idx") + + op.execute("ALTER TYPE activitytype RENAME TO commenttype") + op.execute("ALTER TYPE activityauthortype RENAME TO commentauthortype") + + op.alter_column("task_activities", "task_id", new_column_name="mission_id") + op.alter_column("task_activities", "parent_activity_id", new_column_name="parent_comment_id") + op.rename_table("task_activities", "mission_comments") diff --git a/backend/alembic/versions/20260422_101551_333df7a8ab82_rename_mission_enums_to_task_enums.py b/backend/alembic/versions/20260422_101551_333df7a8ab82_rename_mission_enums_to_task_enums.py new file mode 100644 index 000000000..6f05f2c4d --- /dev/null +++ b/backend/alembic/versions/20260422_101551_333df7a8ab82_rename_mission_enums_to_task_enums.py @@ -0,0 +1,45 @@ +"""rename mission enums to task enums + +Revision ID: 333df7a8ab82 +Revises: h1i2j3k4l5m6 +Create Date: 2026-04-22 10:15:51.632056+00:00 + +""" + +from typing import Sequence, Union + +from alembic import op + +# revision identifiers, used by Alembic. +revision: str = "333df7a8ab82" +down_revision: Union[str, None] = "h1i2j3k4l5m6" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + op.execute(""" + DO $$ + BEGIN + IF EXISTS (SELECT 1 FROM pg_type WHERE typname = 'missionstatus') THEN + ALTER TYPE missionstatus RENAME TO taskstatus; + END IF; + IF EXISTS (SELECT 1 FROM pg_type WHERE typname = 'missionpriority') THEN + ALTER TYPE missionpriority RENAME TO taskpriority; + END IF; + END $$; + """) + + +def downgrade() -> None: + op.execute(""" + DO $$ + BEGIN + IF EXISTS (SELECT 1 FROM pg_type WHERE typname = 'taskstatus') THEN + ALTER TYPE taskstatus RENAME TO missionstatus; + END IF; + IF EXISTS (SELECT 1 FROM pg_type WHERE typname = 'taskpriority') THEN + ALTER TYPE taskpriority RENAME TO missionpriority; + END IF; + END $$; + """) diff --git a/backend/alembic/versions/20260422_103600_444df7a8ab83_add_status_enums_and_fix_fk_constraints.py b/backend/alembic/versions/20260422_103600_444df7a8ab83_add_status_enums_and_fix_fk_constraints.py new file mode 100644 index 000000000..ad4173017 --- /dev/null +++ b/backend/alembic/versions/20260422_103600_444df7a8ab83_add_status_enums_and_fix_fk_constraints.py @@ -0,0 +1,201 @@ +"""add status enums and fix FK constraints + +Revision ID: 556fa9cacd05 +Revises: 444df7a8ab83 +Create Date: 2026-04-22 10:36:00.000000+00:00 + +Changes: +- Create DB enum types: agent_status, agent_version_status, agent_release_status, + agent_run_status, execution_status +- Migrate status columns from varchar to enum types +- Fix agents.created_by ondelete: SET NULL → CASCADE (NOT NULL + SET NULL = crash) +- Fix agent_versions.created_by ondelete: SET NULL → CASCADE (same issue) +- Fix thread_messages.run_id / execution_id FKs: add ondelete=SET NULL +""" + +from typing import Sequence, Union + +from alembic import op + +# revision identifiers, used by Alembic. +revision: str = "556fa9cacd05" +down_revision: Union[str, None] = "444df7a8ab83" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +# --------------------------------------------------------------------------- +# helpers +# --------------------------------------------------------------------------- + + +def _create_enum(name: str, *values: str) -> None: + """CREATE TYPE AS ENUM (...) — idempotent via IF NOT EXISTS.""" + quoted = ", ".join(f"'{v}'" for v in values) + op.execute(f"CREATE TYPE {name} AS ENUM ({quoted})") + + +def _drop_enum(name: str) -> None: + op.execute(f"DROP TYPE IF EXISTS {name}") + + +# --------------------------------------------------------------------------- +# upgrade +# --------------------------------------------------------------------------- + + +def upgrade() -> None: + # ------------------------------------------------------------------ + # 1. Create enum types + # ------------------------------------------------------------------ + _create_enum("agent_status", "draft", "active", "archived") + _create_enum("agent_version_status", "draft", "frozen") + _create_enum("agent_release_status", "building", "ready", "failed", "retired") + _create_enum("agent_run_status", "queued", "running", "succeeded", "failed", "cancelled") + _create_enum("execution_status", "pending", "running", "succeeded", "failed", "cancelled") + + # ------------------------------------------------------------------ + # 2. Migrate status columns varchar → enum (using USING cast) + # ------------------------------------------------------------------ + # agents + op.execute("ALTER TABLE agents ALTER COLUMN status DROP DEFAULT") + op.execute("ALTER TABLE agents ALTER COLUMN status TYPE agent_status USING status::agent_status") + op.execute("ALTER TABLE agents ALTER COLUMN status SET DEFAULT 'draft'::agent_status") + + # agent_versions + op.execute("ALTER TABLE agent_versions ALTER COLUMN status DROP DEFAULT") + op.execute( + "ALTER TABLE agent_versions ALTER COLUMN status TYPE agent_version_status USING status::agent_version_status" + ) + op.execute("ALTER TABLE agent_versions ALTER COLUMN status SET DEFAULT 'draft'::agent_version_status") + + # agent_releases + op.execute("ALTER TABLE agent_releases ALTER COLUMN status DROP DEFAULT") + op.execute( + "ALTER TABLE agent_releases ALTER COLUMN status TYPE agent_release_status USING status::agent_release_status" + ) + op.execute("ALTER TABLE agent_releases ALTER COLUMN status SET DEFAULT 'building'::agent_release_status") + + # agent_runs + op.execute("ALTER TABLE agent_runs ALTER COLUMN status DROP DEFAULT") + op.execute("ALTER TABLE agent_runs ALTER COLUMN status TYPE agent_run_status USING status::agent_run_status") + op.execute("ALTER TABLE agent_runs ALTER COLUMN status SET DEFAULT 'queued'::agent_run_status") + + # executions + op.execute("ALTER TABLE executions ALTER COLUMN status DROP DEFAULT") + op.execute( + "ALTER TABLE executions " + "ALTER COLUMN status TYPE execution_status " + "USING (CASE " + " WHEN status IN ('queued', 'dispatched') THEN 'pending' " + " WHEN status = 'completed' THEN 'succeeded' " + " ELSE status " + "END)::execution_status" + ) + op.execute("ALTER TABLE executions ALTER COLUMN status SET DEFAULT 'pending'::execution_status") + + # 3. Fix agents.created_by: drop old FK (no ondelete) → re-add CASCADE + # The actual name in DB is "fk_agents_created_by_user". + # ------------------------------------------------------------------ + op.drop_constraint("fk_agents_created_by_user", "agents", type_="foreignkey") + op.create_foreign_key( + "fk_agents_created_by", + "agents", + "user", + ["created_by"], + ["id"], + ondelete="CASCADE", + ) + + # ------------------------------------------------------------------ + # 4. Fix agent_versions.created_by (same issue, actual: "fk_agent_versions_created_by_user") + # ------------------------------------------------------------------ + op.drop_constraint("fk_agent_versions_created_by_user", "agent_versions", type_="foreignkey") + op.create_foreign_key( + "fk_agent_versions_created_by", + "agent_versions", + "user", + ["created_by"], + ["id"], + ondelete="CASCADE", + ) + + # ------------------------------------------------------------------ + # 5. Fix thread_messages.run_id / execution_id: drop old FKs (no + # ondelete), re-add with ondelete=SET NULL. + # Old names from migration ee55ff66aa77: fk_messages_run / fk_messages_execution + # ------------------------------------------------------------------ + op.drop_constraint("fk_messages_run", "thread_messages", type_="foreignkey") + op.create_foreign_key( + "fk_thread_messages_run_id", + "thread_messages", + "agent_runs", + ["run_id"], + ["id"], + ondelete="SET NULL", + ) + + op.drop_constraint("fk_messages_execution", "thread_messages", type_="foreignkey") + op.create_foreign_key( + "fk_thread_messages_execution_id", + "thread_messages", + "executions", + ["execution_id"], + ["id"], + ondelete="SET NULL", + ) + + +# --------------------------------------------------------------------------- +# downgrade +# --------------------------------------------------------------------------- + + +def downgrade() -> None: + # Restore thread_messages FKs without ondelete + op.drop_constraint("fk_thread_messages_execution_id", "thread_messages", type_="foreignkey") + op.create_foreign_key("fk_messages_execution", "thread_messages", "executions", ["execution_id"], ["id"]) + + op.drop_constraint("fk_thread_messages_run_id", "thread_messages", type_="foreignkey") + op.create_foreign_key("fk_messages_run", "thread_messages", "agent_runs", ["run_id"], ["id"]) + + # Restore agent_versions.created_by FK without ondelete + op.drop_constraint("fk_agent_versions_created_by", "agent_versions", type_="foreignkey") + op.create_foreign_key(None, "agent_versions", "user", ["created_by"], ["id"]) + + # Restore agents.created_by FK without ondelete + op.drop_constraint("fk_agents_created_by", "agents", type_="foreignkey") + op.create_foreign_key(None, "agents", "user", ["created_by"], ["id"]) + + # Revert status columns enum → varchar + # executions + op.execute("ALTER TABLE executions ALTER COLUMN status DROP DEFAULT") + op.execute("ALTER TABLE executions ALTER COLUMN status TYPE varchar(20) USING status::text") + op.execute("ALTER TABLE executions ALTER COLUMN status SET DEFAULT 'queued'") + + # agent_runs + op.execute("ALTER TABLE agent_runs ALTER COLUMN status DROP DEFAULT") + op.execute("ALTER TABLE agent_runs ALTER COLUMN status TYPE varchar(20) USING status::text") + op.execute("ALTER TABLE agent_runs ALTER COLUMN status SET DEFAULT 'queued'") + + # agent_releases + op.execute("ALTER TABLE agent_releases ALTER COLUMN status DROP DEFAULT") + op.execute("ALTER TABLE agent_releases ALTER COLUMN status TYPE varchar(20) USING status::text") + op.execute("ALTER TABLE agent_releases ALTER COLUMN status SET DEFAULT 'building'") + + # agent_versions + op.execute("ALTER TABLE agent_versions ALTER COLUMN status DROP DEFAULT") + op.execute("ALTER TABLE agent_versions ALTER COLUMN status TYPE varchar(20) USING status::text") + op.execute("ALTER TABLE agent_versions ALTER COLUMN status SET DEFAULT 'draft'") + + # agents + op.execute("ALTER TABLE agents ALTER COLUMN status DROP DEFAULT") + op.execute("ALTER TABLE agents ALTER COLUMN status TYPE varchar(20) USING status::text") + op.execute("ALTER TABLE agents ALTER COLUMN status SET DEFAULT 'draft'") + + # Drop enum types + _drop_enum("execution_status") + _drop_enum("agent_run_status") + _drop_enum("agent_release_status") + _drop_enum("agent_version_status") + _drop_enum("agent_status") diff --git a/backend/alembic/versions/20260422_110000_444df7a8ab83_add_todo_and_in_review_to_taskstatus_enum.py b/backend/alembic/versions/20260422_110000_444df7a8ab83_add_todo_and_in_review_to_taskstatus_enum.py new file mode 100644 index 000000000..0e4ee3713 --- /dev/null +++ b/backend/alembic/versions/20260422_110000_444df7a8ab83_add_todo_and_in_review_to_taskstatus_enum.py @@ -0,0 +1,26 @@ +"""add todo and in_review to taskstatus enum + +Revision ID: 444df7a8ab83 +Revises: 333df7a8ab82 +Create Date: 2026-04-22 11:00:00.000000+00:00 + +""" + +from typing import Sequence, Union + +from alembic import op + +# revision identifiers, used by Alembic. +revision: str = "444df7a8ab83" +down_revision: Union[str, None] = "333df7a8ab82" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + op.execute("ALTER TYPE taskstatus ADD VALUE IF NOT EXISTS 'todo'") + op.execute("ALTER TYPE taskstatus ADD VALUE IF NOT EXISTS 'in_review'") + + +def downgrade() -> None: + pass # PostgreSQL cannot remove enum values diff --git a/backend/alembic/versions/20260422_120000_667ab8bcde06_unify_status_vocabulary_queued_to_pending_and_building_to_pending.py b/backend/alembic/versions/20260422_120000_667ab8bcde06_unify_status_vocabulary_queued_to_pending_and_building_to_pending.py new file mode 100644 index 000000000..10efd4266 --- /dev/null +++ b/backend/alembic/versions/20260422_120000_667ab8bcde06_unify_status_vocabulary_queued_to_pending_and_building_to_pending.py @@ -0,0 +1,32 @@ +"""unify status vocabulary queued to pending and building to pending + +Revision ID: 667ab8bcde06 +Revises: 556fa9cacd05 +Create Date: 2026-04-22 12:00:00.000000+00:00 + +Changes: +- agent_run_status enum: rename 'queued' -> 'pending' +- agent_release_status enum: rename 'building' -> 'pending' + +NOTE: ALTER TYPE ... RENAME VALUE requires PostgreSQL 10+. +""" + +from typing import Sequence, Union + +from alembic import op + +# revision identifiers, used by Alembic. +revision: str = "667ab8bcde06" +down_revision: Union[str, None] = "556fa9cacd05" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + op.execute("ALTER TYPE agent_run_status RENAME VALUE 'queued' TO 'pending'") + op.execute("ALTER TYPE agent_release_status RENAME VALUE 'building' TO 'pending'") + + +def downgrade() -> None: + op.execute("ALTER TYPE agent_run_status RENAME VALUE 'pending' TO 'queued'") + op.execute("ALTER TYPE agent_release_status RENAME VALUE 'pending' TO 'building'") diff --git a/backend/alembic/versions/20260422_130000_778bc9cdef07_add_dispatched_and_approval_wait_to_execution_status.py b/backend/alembic/versions/20260422_130000_778bc9cdef07_add_dispatched_and_approval_wait_to_execution_status.py new file mode 100644 index 000000000..32704b160 --- /dev/null +++ b/backend/alembic/versions/20260422_130000_778bc9cdef07_add_dispatched_and_approval_wait_to_execution_status.py @@ -0,0 +1,33 @@ +"""add dispatched and approval_wait to execution_status enum + +Revision ID: 778bc9cdef07 +Revises: 667ab8bcde06 +Create Date: 2026-04-22 13:00:00.000000+00:00 + +Changes: +- execution_status enum: add 'dispatched' and 'approval_wait' values + These states are actively written by execution_runner.py / definitions.py + but were missing from the DB enum, causing runtime errors on first write. + +NOTE: ALTER TYPE ... ADD VALUE requires PostgreSQL 9.1+. + Enum values cannot be removed in PostgreSQL (downgrade is a no-op). +""" + +from typing import Sequence, Union + +from alembic import op + +# revision identifiers, used by Alembic. +revision: str = "778bc9cdef07" +down_revision: Union[str, None] = "667ab8bcde06" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + op.execute("ALTER TYPE execution_status ADD VALUE IF NOT EXISTS 'dispatched'") + op.execute("ALTER TYPE execution_status ADD VALUE IF NOT EXISTS 'approval_wait'") + + +def downgrade() -> None: + pass # Cannot remove enum values in PostgreSQL diff --git a/backend/alembic/versions/20260424_000000_889cd0def08_remove_pending_from_agent_release_status.py b/backend/alembic/versions/20260424_000000_889cd0def08_remove_pending_from_agent_release_status.py new file mode 100644 index 000000000..8ebb24b33 --- /dev/null +++ b/backend/alembic/versions/20260424_000000_889cd0def08_remove_pending_from_agent_release_status.py @@ -0,0 +1,58 @@ +"""remove pending from agent_release_status enum + +Revision ID: 889cd0def08 +Revises: 778bc9cdef07 +Create Date: 2026-04-24 00:00:00.000000+00:00 + +Changes: +- agent_release_status enum: remove 'pending' (never used in practice) +- Set server_default to 'ready' (matches ORM default and service behavior) + +PostgreSQL cannot DROP a value from an enum, so we recreate the type. +""" + +from typing import Sequence, Union + +from alembic import op + +revision: str = "889cd0def08" +down_revision: Union[str, None] = "778bc9cdef07" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + # 1. Drop the default so the column has no dependency on the enum during swap + op.execute("ALTER TABLE agent_releases ALTER COLUMN status DROP DEFAULT") + + # 2. Rename old enum out of the way + op.execute("ALTER TYPE agent_release_status RENAME TO agent_release_status_old") + + # 3. Create new enum without 'pending' + op.execute("CREATE TYPE agent_release_status AS ENUM ('ready', 'failed', 'retired')") + + # 4. Swap column type (no rows should have 'pending'; fail loudly if they do) + op.execute( + "ALTER TABLE agent_releases " + "ALTER COLUMN status TYPE agent_release_status " + "USING status::text::agent_release_status" + ) + + # 5. Set correct default + op.execute("ALTER TABLE agent_releases ALTER COLUMN status SET DEFAULT 'ready'::agent_release_status") + + # 6. Drop old enum + op.execute("DROP TYPE agent_release_status_old") + + +def downgrade() -> None: + op.execute("ALTER TABLE agent_releases ALTER COLUMN status DROP DEFAULT") + op.execute("ALTER TYPE agent_release_status RENAME TO agent_release_status_old") + op.execute("CREATE TYPE agent_release_status AS ENUM ('pending', 'ready', 'failed', 'retired')") + op.execute( + "ALTER TABLE agent_releases " + "ALTER COLUMN status TYPE agent_release_status " + "USING status::text::agent_release_status" + ) + op.execute("ALTER TABLE agent_releases ALTER COLUMN status SET DEFAULT 'pending'::agent_release_status") + op.execute("DROP TYPE agent_release_status_old") diff --git a/backend/alembic/versions/20260425_000000_990de1ef09_add_draft_agent_runs.py b/backend/alembic/versions/20260425_000000_990de1ef09_add_draft_agent_runs.py new file mode 100644 index 000000000..284637e43 --- /dev/null +++ b/backend/alembic/versions/20260425_000000_990de1ef09_add_draft_agent_runs.py @@ -0,0 +1,49 @@ +"""add draft agent runs + +Revision ID: 990de1ef09 +Revises: 889cd0def08 +Create Date: 2026-04-25 00:00:00.000000+00:00 +""" + +from typing import Sequence, Union + +import sqlalchemy as sa +from sqlalchemy.dialects.postgresql import UUID + +from alembic import op + +revision: str = "990de1ef09" +down_revision: Union[str, None] = "889cd0def08" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + op.add_column("agent_runs", sa.Column("agent_version_id", UUID(as_uuid=True), nullable=True)) + op.create_foreign_key( + "fk_agent_runs_agent_version_id", + "agent_runs", + "agent_versions", + ["agent_version_id"], + ["id"], + ) + op.alter_column("agent_runs", "release_id", existing_type=UUID(as_uuid=True), nullable=True) + op.create_check_constraint( + "ck_agent_runs_release_or_version", + "agent_runs", + "(release_id IS NOT NULL) <> (agent_version_id IS NOT NULL)", + ) + op.create_index( + "ix_agent_runs_agent_version_id", + "agent_runs", + ["agent_version_id"], + ) + + +def downgrade() -> None: + op.drop_constraint("ck_agent_runs_release_or_version", "agent_runs", type_="check") + op.drop_index("ix_agent_runs_agent_version_id", table_name="agent_runs") + op.execute("DELETE FROM agent_runs WHERE release_id IS NULL") + op.alter_column("agent_runs", "release_id", existing_type=UUID(as_uuid=True), nullable=False) + op.drop_constraint("fk_agent_runs_agent_version_id", "agent_runs", type_="foreignkey") + op.drop_column("agent_runs", "agent_version_id") diff --git a/backend/alembic/versions/20260426_000000_aab1c2d3e4f5_fix_active_agents_without_release.py b/backend/alembic/versions/20260426_000000_aab1c2d3e4f5_fix_active_agents_without_release.py new file mode 100644 index 000000000..62fe9852e --- /dev/null +++ b/backend/alembic/versions/20260426_000000_aab1c2d3e4f5_fix_active_agents_without_release.py @@ -0,0 +1,28 @@ +"""fix agents with status='active' but no active release + +Revision ID: aab1c2d3e4f5 +Revises: 990de1ef09 +Create Date: 2026-04-26 00:00:00.000000+00:00 + +Changes: +- Data-only migration: revert agents to 'draft' when status is 'active' + but active_release_id is NULL (out-of-sync state from retire_release + not syncing status prior to this fix). +""" + +from typing import Sequence, Union + +from alembic import op + +revision: str = "aab1c2d3e4f5" +down_revision: Union[str, None] = "990de1ef09" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + op.execute("UPDATE agents SET status = 'draft' WHERE status = 'active' AND active_release_id IS NULL") + + +def downgrade() -> None: + pass diff --git a/backend/alembic/versions/20260427_000000_bb1cc2dd3ee4_add_encrypted_custom_env_to_agents.py b/backend/alembic/versions/20260427_000000_bb1cc2dd3ee4_add_encrypted_custom_env_to_agents.py new file mode 100644 index 000000000..7d7db2695 --- /dev/null +++ b/backend/alembic/versions/20260427_000000_bb1cc2dd3ee4_add_encrypted_custom_env_to_agents.py @@ -0,0 +1,29 @@ +"""add encrypted_custom_env to agents + +Revision ID: bb1cc2dd3ee4 +Revises: aab1c2d3e4f5 +Create Date: 2026-04-27 00:00:00.000000+00:00 + +Changes: +- Add encrypted_custom_env (Text, nullable) to agents table for + per-agent API key overrides, replacing the dropped agent_profiles.custom_env. +""" + +from typing import Sequence, Union + +import sqlalchemy as sa + +from alembic import op + +revision: str = "bb1cc2dd3ee4" +down_revision: Union[str, None] = "aab1c2d3e4f5" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + op.add_column("agents", sa.Column("encrypted_custom_env", sa.Text(), nullable=True)) + + +def downgrade() -> None: + op.drop_column("agents", "encrypted_custom_env") diff --git a/backend/alembic/versions/20260427_140000_cc3dd4ee5ff6_unify_execution_error_payload.py b/backend/alembic/versions/20260427_140000_cc3dd4ee5ff6_unify_execution_error_payload.py new file mode 100644 index 000000000..50ff64c7d --- /dev/null +++ b/backend/alembic/versions/20260427_140000_cc3dd4ee5ff6_unify_execution_error_payload.py @@ -0,0 +1,40 @@ +"""unify execution error payload""" + +import sqlalchemy as sa +from sqlalchemy.dialects import postgresql + +from alembic import op + +revision = "cc3dd4ee5ff6" +down_revision = "bb1cc2dd3ee4" + + +def upgrade(): + op.add_column("executions", sa.Column("error", postgresql.JSONB(astext_type=sa.Text()), nullable=True)) + op.execute( + """ + UPDATE executions + SET error = jsonb_build_object( + 'code', COALESCE(error_code, 'EXECUTION_FAILED'), + 'message', COALESCE(error_message, 'Execution failed'), + 'data', NULL + ) + WHERE error_code IS NOT NULL OR error_message IS NOT NULL + """ + ) + op.drop_column("executions", "error_code") + op.drop_column("executions", "error_message") + + +def downgrade(): + op.add_column("executions", sa.Column("error_message", sa.Text(), nullable=True)) + op.add_column("executions", sa.Column("error_code", sa.String(length=100), nullable=True)) + op.execute( + """ + UPDATE executions + SET error_code = error->>'code', + error_message = error->>'message' + WHERE error IS NOT NULL + """ + ) + op.drop_column("executions", "error") diff --git a/backend/alembic/versions/20260428_000000_dd4ee5ff6aa7_add_traces_and_observations_tables.py b/backend/alembic/versions/20260428_000000_dd4ee5ff6aa7_add_traces_and_observations_tables.py new file mode 100644 index 000000000..5fb163d04 --- /dev/null +++ b/backend/alembic/versions/20260428_000000_dd4ee5ff6aa7_add_traces_and_observations_tables.py @@ -0,0 +1,80 @@ +"""add traces and observations tables""" + +import sqlalchemy as sa +from sqlalchemy.dialects.postgresql import JSONB, UUID + +from alembic import op + +revision = "dd4ee5ff6aa7" +down_revision = "cc3dd4ee5ff6" + + +def upgrade(): + op.create_table( + "traces", + sa.Column("id", UUID(as_uuid=True), primary_key=True), + sa.Column("name", sa.String(255), nullable=False), + sa.Column("workspace_id", UUID(as_uuid=True), nullable=False), + sa.Column("start_time", sa.DateTime(timezone=True), nullable=False), + sa.Column("end_time", sa.DateTime(timezone=True)), + sa.Column("status", sa.String(20), nullable=False, server_default="running"), + sa.Column("input", JSONB), + sa.Column("output", JSONB), + sa.Column("metadata", JSONB), + sa.Column("environment", sa.String(50), server_default="debug"), + sa.Column("tags", sa.ARRAY(sa.String), server_default="{}"), + sa.Column("release", sa.String(255)), + sa.Column("version", sa.String(100)), + sa.Column("session_id", sa.String(255)), + sa.Column("bookmarked", sa.Boolean, server_default="false"), + sa.Column("public", sa.Boolean, server_default="false"), + sa.Column("total_observations", sa.Integer, server_default="0"), + sa.Column("total_tokens", sa.Integer, server_default="0"), + sa.Column("total_cost", sa.Numeric(12, 6)), + sa.Column("duration_ms", sa.Integer), + sa.Column("execution_id", UUID(as_uuid=True), nullable=False), + sa.Column("agent_version_id", UUID(as_uuid=True), nullable=False), + sa.Column("user_id", UUID(as_uuid=True), nullable=False), + sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.func.now()), + ) + op.create_index("ix_traces_workspace_created", "traces", ["workspace_id", "created_at"]) + op.create_index("ix_traces_execution", "traces", ["execution_id"], unique=True) + op.create_index("ix_traces_session", "traces", ["session_id", "created_at"]) + + op.create_table( + "observations", + sa.Column("id", UUID(as_uuid=True), primary_key=True), + sa.Column("trace_id", UUID(as_uuid=True), sa.ForeignKey("traces.id", ondelete="CASCADE"), nullable=False), + sa.Column("parent_observation_id", UUID(as_uuid=True)), + sa.Column("type", sa.String(20), nullable=False), + sa.Column("name", sa.String(500), nullable=False), + sa.Column("level", sa.String(10), nullable=False, server_default="DEFAULT"), + sa.Column("status_message", sa.Text), + sa.Column("environment", sa.String(50), server_default="debug"), + sa.Column("start_time", sa.DateTime(timezone=True), nullable=False), + sa.Column("end_time", sa.DateTime(timezone=True)), + sa.Column("completion_start_time", sa.DateTime(timezone=True)), + sa.Column("input", JSONB), + sa.Column("output", JSONB), + sa.Column("metadata", JSONB), + sa.Column("model", sa.String(100)), + sa.Column("model_parameters", JSONB), + sa.Column("usage_details", JSONB), + sa.Column("cost_details", JSONB), + sa.Column("prompt_name", sa.String(255)), + sa.Column("prompt_version", sa.Integer), + sa.Column("tool_definitions", JSONB), + sa.Column("tool_calls", JSONB), + sa.Column("tool_call_names", sa.ARRAY(sa.String)), + sa.Column("execution_id", UUID(as_uuid=True), nullable=False), + sa.Column("workspace_id", UUID(as_uuid=True), nullable=False), + sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.func.now()), + ) + op.create_index("ix_observations_trace_time", "observations", ["trace_id", "start_time"]) + op.create_index("ix_observations_parent", "observations", ["parent_observation_id"]) + op.create_index("ix_observations_trace_type", "observations", ["trace_id", "type"]) + + +def downgrade(): + op.drop_table("observations") + op.drop_table("traces") diff --git a/backend/alembic/versions/20260429_000000_ee5ff6aa7bb8_extend_release_status.py b/backend/alembic/versions/20260429_000000_ee5ff6aa7bb8_extend_release_status.py new file mode 100644 index 000000000..67c0bc9d6 --- /dev/null +++ b/backend/alembic/versions/20260429_000000_ee5ff6aa7bb8_extend_release_status.py @@ -0,0 +1,56 @@ +"""extend agent_release_status enum with active and superseded + +Revision ID: ee5ff6aa7bb8 +Revises: dd4ee5ff6aa7 +Create Date: 2026-04-29 00:00:00.000000+00:00 + +Changes: +- agent_release_status enum: add 'active' and 'superseded' values +- Backfill: each agent's active_release_id row -> 'active' +- Backfill: sibling 'ready' rows (same agent, has active sibling) -> 'superseded' +""" + +from typing import Sequence, Union + +from alembic import op + +revision: str = "ee5ff6aa7bb8" +down_revision: Union[str, None] = "dd4ee5ff6aa7" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + with op.get_context().autocommit_block(): + op.execute("ALTER TYPE agent_release_status ADD VALUE IF NOT EXISTS 'active'") + op.execute("ALTER TYPE agent_release_status ADD VALUE IF NOT EXISTS 'superseded'") + + op.execute( + """ + UPDATE agent_releases r + SET status = 'active' + FROM agents a + WHERE a.active_release_id = r.id + AND r.status = 'ready' + """ + ) + + op.execute( + """ + UPDATE agent_releases r + SET status = 'superseded' + FROM agent_versions v + WHERE r.agent_version_id = v.id + AND r.status = 'ready' + AND EXISTS ( + SELECT 1 FROM agent_releases r2 + JOIN agent_versions v2 ON r2.agent_version_id = v2.id + WHERE v2.agent_id = v.agent_id + AND r2.status = 'active' + ) + """ + ) + + +def downgrade() -> None: + op.execute("UPDATE agent_releases SET status = 'ready' WHERE status IN ('active', 'superseded')") diff --git a/backend/alembic/versions/20260429_100000_ff6aa7bb8cc9_drop_observation_tool_call_names.py b/backend/alembic/versions/20260429_100000_ff6aa7bb8cc9_drop_observation_tool_call_names.py new file mode 100644 index 000000000..60261dc41 --- /dev/null +++ b/backend/alembic/versions/20260429_100000_ff6aa7bb8cc9_drop_observation_tool_call_names.py @@ -0,0 +1,31 @@ +"""drop dead tool_call_names column from observations + +Revision ID: ff6aa7bb8cc9 +Revises: ee5ff6aa7bb8 +Create Date: 2026-04-29 10:00:00.000000+00:00 + +Changes: +- Drop tool_call_names column from observations table (never populated) +""" + +from typing import Sequence, Union + +import sqlalchemy as sa + +from alembic import op + +revision: str = "ff6aa7bb8cc9" +down_revision: Union[str, None] = "ee5ff6aa7bb8" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + op.drop_column("observations", "tool_call_names") + + +def downgrade() -> None: + op.add_column( + "observations", + sa.Column("tool_call_names", sa.ARRAY(sa.String), nullable=True), + ) diff --git a/backend/alembic/versions/20260430_000000_1a2b3c4d5e6f_unify_agent_run_metadata.py b/backend/alembic/versions/20260430_000000_1a2b3c4d5e6f_unify_agent_run_metadata.py new file mode 100644 index 000000000..92b8ff12b --- /dev/null +++ b/backend/alembic/versions/20260430_000000_1a2b3c4d5e6f_unify_agent_run_metadata.py @@ -0,0 +1,89 @@ +"""unify_agent_run_metadata + +Revision ID: 1a2b3c4d5e6f +Revises: +Create Date: 2026-04-30 18:32:00.000000 + +""" + +import sqlalchemy as sa + +from alembic import op + +# revision identifiers, used by Alembic. +revision = "1a2b3c4d5e6f" +down_revision = "ff6aa7bb8cc9" +branch_labels = None +depends_on = None + + +def upgrade(): + # 1. Add new columns + op.add_column("agent_runs", sa.Column("trigger_medium", sa.String(length=20), nullable=True)) + op.add_column("agent_runs", sa.Column("run_purpose", sa.String(length=20), nullable=True)) + + # 2. Data Migration: single-pass CASE to map old trigger_source → new axes + op.execute(""" + UPDATE agent_runs SET + trigger_medium = CASE trigger_source + WHEN 'task' THEN 'system' + WHEN 'chat' THEN 'api' + WHEN 'api' THEN 'api' + WHEN 'scheduler' THEN 'scheduler' + WHEN 'draft_test' THEN 'ui' + WHEN 'draft_copilot' THEN 'ui' + WHEN 'copilot' THEN 'ui' + WHEN 'debug' THEN 'ui' + ELSE 'api' + END, + run_purpose = CASE trigger_source + WHEN 'task' THEN 'production' + WHEN 'chat' THEN 'production' + WHEN 'api' THEN 'production' + WHEN 'scheduler' THEN 'production' + WHEN 'draft_test' THEN 'draft_test' + WHEN 'draft_copilot' THEN 'internal_builder' + WHEN 'copilot' THEN 'internal_builder' + WHEN 'debug' THEN 'debug' + ELSE 'production' + END + """) + + # 3. Make new columns non-nullable + op.alter_column("agent_runs", "trigger_medium", existing_type=sa.String(length=20), nullable=False) + op.alter_column("agent_runs", "run_purpose", existing_type=sa.String(length=20), nullable=False) + + # 4. Drop old column + op.drop_column("agent_runs", "trigger_source") + + # 5. Clean up old executor kinds and definition kinds + op.execute("UPDATE executions SET executor_kind = 'build_copilot' WHERE executor_kind = 'copilot'") + op.execute( + "UPDATE agent_versions SET definition_kind = 'sandbox_cli' WHERE definition_kind IN ('claude_code', 'codex', 'openclaw')" + ) + + +def downgrade(): + # 1. Add old column + op.add_column("agent_runs", sa.Column("trigger_source", sa.String(length=20), nullable=True)) + + # 2. Revert Data Migration: single-pass CASE + op.execute(""" + UPDATE agent_runs SET + trigger_source = CASE + WHEN trigger_medium = 'system' AND run_purpose = 'production' THEN 'task' + WHEN trigger_medium = 'api' AND run_purpose = 'production' THEN 'chat' + WHEN trigger_medium = 'scheduler' AND run_purpose = 'production' THEN 'scheduler' + WHEN trigger_medium = 'ui' AND run_purpose = 'draft_test' THEN 'draft_test' + WHEN trigger_medium = 'ui' AND run_purpose = 'internal_builder' THEN 'draft_copilot' + WHEN trigger_medium = 'ui' AND run_purpose = 'debug' THEN 'debug' + ELSE 'api' + END + """) + + # 3. Make old column non-nullable + op.alter_column("agent_runs", "trigger_source", existing_type=sa.String(length=20), nullable=False) + + # 4. Drop new columns + op.drop_column("agent_runs", "run_purpose") + op.drop_column("agent_runs", "trigger_medium") diff --git a/backend/alembic/versions/20260430_010000_b2af1f3e0215_refactor_agent_kinds.py b/backend/alembic/versions/20260430_010000_b2af1f3e0215_refactor_agent_kinds.py new file mode 100644 index 000000000..eb1caee86 --- /dev/null +++ b/backend/alembic/versions/20260430_010000_b2af1f3e0215_refactor_agent_kinds.py @@ -0,0 +1,68 @@ +"""refactor_agent_kinds + +Revision ID: b2af1f3e0215 +Revises: 1a2b3c4d5e6f +Create Date: 2026-04-30 + +Renames definition_kind->engine_kind, executor_kind->engine_kind. +Remaps values: graph->langgraph_visual, code->langgraph_code, +sandbox_cli->(split by runtime_binding), graph/code runtime->server. +""" + +from alembic import op + +revision = "b2af1f3e0215" +down_revision = "1a2b3c4d5e6f" +branch_labels = None +depends_on = None + + +def upgrade(): + # 1. agent_versions: rename column + remap values + op.alter_column("agent_versions", "definition_kind", new_column_name="engine_kind") + op.execute("UPDATE agent_versions SET engine_kind = 'langgraph_visual' WHERE engine_kind = 'graph'") + op.execute("UPDATE agent_versions SET engine_kind = 'langgraph_code' WHERE engine_kind = 'code'") + # sandbox_cli -> split by runtime_binding from linked releases (UPDATE FROM avoids correlated subquery) + op.execute(""" + UPDATE agent_versions av + SET engine_kind = COALESCE(ar.runtime_type, 'claude_code') + FROM ( + SELECT DISTINCT ON (agent_version_id) + agent_version_id, + runtime_binding->>'runtime_type' AS runtime_type + FROM agent_releases + ORDER BY agent_version_id, created_at DESC + ) ar + WHERE ar.agent_version_id = av.id + AND av.engine_kind = 'sandbox_cli' + """) + # Any sandbox_cli rows with no releases default to claude_code + op.execute(""" + UPDATE agent_versions + SET engine_kind = 'claude_code' + WHERE engine_kind = 'sandbox_cli' + """) + + # 2. agent_releases: remap runtime_kind values + op.execute("UPDATE agent_releases SET runtime_kind = 'server' WHERE runtime_kind IN ('graph', 'code')") + + # 3. executions: rename column (values already correct: claude_code, codex, openclaw, build_copilot) + op.alter_column("executions", "executor_kind", new_column_name="engine_kind") + + +def downgrade(): + # 3. executions: restore column name + op.alter_column("executions", "engine_kind", new_column_name="executor_kind") + + # 2. agent_releases: restore runtime_kind values (best-effort) + # Cannot distinguish graph vs code from runtime_kind alone, default to graph + op.execute("UPDATE agent_releases SET runtime_kind = 'graph' WHERE runtime_kind = 'server'") + + # 1. agent_versions: restore column name + remap values + op.execute("UPDATE agent_versions SET engine_kind = 'graph' WHERE engine_kind = 'langgraph_visual'") + op.execute("UPDATE agent_versions SET engine_kind = 'code' WHERE engine_kind = 'langgraph_code'") + op.execute(""" + UPDATE agent_versions SET engine_kind = 'sandbox_cli' + WHERE engine_kind IN ('claude_code', 'codex', 'openclaw') + """) + op.alter_column("agent_versions", "engine_kind", new_column_name="definition_kind") diff --git a/backend/app/MODEL.md b/backend/app/MODEL.md index 88794a092..ba30a3b45 100644 --- a/backend/app/MODEL.md +++ b/backend/app/MODEL.md @@ -227,12 +227,14 @@ backend/app/ - `POST /api/v1/models/instances` - 创建模型实例配置 - `GET /api/v1/models/instances` - 获取模型实例配置列表 - `POST /api/v1/models/test-output` - 测试模型输出 +- `POST /api/v1/models/test-output-stream` - 测试模型输出(SSE 流式) ### 模型供应商 API - `GET /api/v1/model-providers` - 获取所有供应商列表 - `GET /api/v1/model-providers/{provider_name}` - 获取单个供应商详情 - `POST /api/v1/model-providers/sync` - 同步供应商、模型和认证信息 +- `POST /api/v1/model-providers/custom` - 添加自定义供应商 ### 模型凭据 API @@ -279,7 +281,7 @@ backend/app/ ## 注意事项 1. **全局可见性**: 当前实现中,模型实例和凭据对所有用户和工作空间可见(user_id 和 workspace_id 为 NULL 的记录) -2. **认证**: 当前代码中用户认证部分被注释,使用匿名用户ID,后续需要恢复认证机制 +2. **认证**: 所有 API 端点使用 `get_current_user` 依赖进行用户认证 3. **同步机制**: 供应商和模型信息通过工厂模式从代码同步到数据库 4. **凭据管理**: 凭据加密存储,解密操作只在 Service 层进行 @@ -290,7 +292,6 @@ backend/app/ - [Repository 层文档](./repositories/MODEL.md) - [Model 层文档](./models/MODEL.md) - [Core 层文档](./core/model/MODEL.md) -- [Core 层详细文档](./core/model/README_CN.md) ## 未来改进 diff --git a/backend/app/SKILL.md b/backend/app/SKILL.md index 36b46ff9e..386826ab4 100644 --- a/backend/app/SKILL.md +++ b/backend/app/SKILL.md @@ -132,7 +132,7 @@ Model Layer (models/skill.py) **权限检查逻辑:** ```python if skill.owner_id and skill.owner_id != current_user_id and not skill.is_public: - raise ForbiddenException("You don't have permission to access this skill") + raise AccessDeniedError("You don't have permission to access this skill") ``` ### 3. 创建技能 @@ -335,9 +335,9 @@ if skill.owner_id and skill.owner_id != current_user_id and not skill.is_public: 技能模块使用以下自定义异常: -- `NotFoundException`: 资源不存在(如技能或文件不存在) -- `ForbiddenException`: 权限不足(如非拥有者尝试修改技能) -- `BadRequestException`: 请求参数错误(如同名技能已存在) +- `NotFoundError`: 资源不存在(如技能或文件不存在) +- `AccessDeniedError`: 权限不足(如非拥有者尝试修改技能) +- `InvalidRequestError`: 请求参数错误(如同名技能已存在) 所有异常都在 `SkillService` 层抛出,由 API 层的全局异常处理器统一处理。 diff --git a/backend/app/api/schemas.py b/backend/app/api/schemas.py index 6a00aac60..88b7159b3 100644 --- a/backend/app/api/schemas.py +++ b/backend/app/api/schemas.py @@ -4,54 +4,36 @@ from pydantic import BaseModel, ConfigDict, Field -class BadRequestResponse(BaseModel): - model_config = ConfigDict(json_schema_extra={"example": {"detail": "Bad request", "error_code": "BAD_REQUEST"}}) - - detail: str = Field(..., description="Error detail message") - error_code: Optional[str] = Field(None, description="Error code for categorization") - - -class NotFoundResponse(BaseModel): - model_config = ConfigDict(json_schema_extra={"example": {"detail": "Not found", "error_code": "NOT_FOUND"}}) - - detail: str = Field(..., description="Error detail message") - error_code: Optional[str] = Field(None, description="Error code for categorization") - - -class UnauthorizedResponse(BaseModel): - model_config = ConfigDict( - json_schema_extra={"example": {"detail": "Unauthorized access", "error_code": "UNAUTHORIZED"}} - ) - - detail: str = Field(..., description="Error detail message") - error_code: Optional[str] = Field(None, description="Error code for categorization") - - -class UnauthenticatedResponse(BaseModel): +class AppErrorPayloadSchema(BaseModel): model_config = ConfigDict( - json_schema_extra={"example": {"detail": "Unauthenticated access", "error_code": "UNAUTHENTICATED"}} + json_schema_extra={ + "example": { + "code": "BAD_REQUEST", + "message": "请求错误", + "data": {"detail": "Bad request"}, + "source": "api", + "retryable": False, + } + } ) - detail: str = Field(..., description="Error detail message") - error_code: Optional[str] = Field(None, description="Error code for categorization") - - -class ValidationErrorResponse(BaseModel): - model_config = ConfigDict( - json_schema_extra={"example": {"detail": "Validation error", "error_code": "VALIDATION_ERROR"}} + code: str = Field(..., description="Stable application error code") + message: str = Field(..., description="User-facing error summary") + data: Optional[dict] = Field(None, description="Structured error metadata") + source: str = Field("internal", description="Error origin: api, engine, runtime, auth, validation, etc.") + retryable: bool = Field(False, description="Whether the client should retry the request") + user_action: Optional[str] = Field( + None, description="Suggested user action: retry, configure_model, relogin, fix_input, contact_support" ) + detail: Optional[str] = Field(None, description="Detailed diagnostic message") - detail: str = Field(..., description="Error detail message") - error_code: Optional[str] = Field(None, description="Error code for categorization") - - -class InternalServerErrorResponse(BaseModel): - model_config = ConfigDict( - json_schema_extra={"example": {"detail": "Internal server error", "error_code": "INTERNAL_SERVER_ERROR"}} - ) - detail: str = Field(..., description="Error detail message") - error_code: Optional[str] = Field(None, description="Error code for categorization") +BadRequestResponse = AppErrorPayloadSchema +NotFoundResponse = AppErrorPayloadSchema +UnauthorizedResponse = AppErrorPayloadSchema +UnauthenticatedResponse = AppErrorPayloadSchema +ValidationErrorResponse = AppErrorPayloadSchema +InternalServerErrorResponse = AppErrorPayloadSchema class HealthResponse(BaseModel): diff --git a/backend/app/api/v1/MODEL.md b/backend/app/api/v1/MODEL.md index 157f0195f..2f67306b3 100644 --- a/backend/app/api/v1/MODEL.md +++ b/backend/app/api/v1/MODEL.md @@ -50,6 +50,15 @@ - `workspaceId`: 工作空间ID(可选) - **返回**: 模型输出结果 +#### 5. 测试模型输出(流式) +- **端点**: `POST /api/v1/models/test-output-stream` +- **功能**: 以 SSE 流式方式测试指定模型的输出 +- **请求体**: + - `model_name`: 模型名称 + - `input`: 输入文本 + - `workspaceId`: 工作空间ID(可选) +- **返回**: SSE 事件流 + ### 模型供应商 API (`model_providers.py`) #### 1. 获取所有供应商列表 @@ -79,6 +88,11 @@ - 同步认证信息(从 .env 读取并同步到 model_credential 表,全局记录) - **返回**: 同步结果统计 +#### 4. 添加自定义供应商 +- **端点**: `POST /api/v1/model-providers/custom` +- **功能**: 一步创建自定义供应商(含 provider + credential + model_instance) +- **返回**: 创建结果 + ### 模型凭据 API (`model_credentials.py`) #### 1. 创建或更新凭据 @@ -140,7 +154,7 @@ Repository 层 (ModelInstanceRepository, ModelProviderRepository, ModelCredentia ## 注意事项 -1. **认证**: 当前代码中用户认证部分被注释,使用匿名用户ID,后续需要恢复认证机制 +1. **认证**: 所有 API 端点使用 `get_current_user` 依赖进行用户认证 2. **全局可见性**: 模型实例和凭据对所有用户和工作空间可见(user_id 和 workspace_id 为 NULL 的记录) 3. **凭据加密**: 所有凭据在存储前都会进行加密处理 4. **同步机制**: 供应商和模型信息通过工厂模式从代码同步到数据库 diff --git a/backend/app/api/v1/__init__.py b/backend/app/api/v1/__init__.py index 8bd3e1831..50d7db723 100644 --- a/backend/app/api/v1/__init__.py +++ b/backend/app/api/v1/__init__.py @@ -6,15 +6,15 @@ from fastapi import APIRouter +from .agent_runs import router as agent_runs_router +from .agents import router as agents_router from .artifacts import router as artifacts_router from .auth import router as auth_router -from .conversations import router as conversations_router +from .copilot import router as copilot_router from .custom_tools import router as custom_tools_router from .environment import router as environment_router +from .executions import router as executions_router from .files import router as files_router -from .graph_code import router as graph_code_router -from .graph_deployments import router as graph_deployments_router -from .graphs import router as graphs_router from .mcp import router as mcp_router from .memory import router as memory_router from .model_credentials import router as model_credentials_router @@ -22,39 +22,36 @@ from .model_usage import router as model_usage_router from .models import router as models_router from .oauth import router as oauth_router -from .openapi_graph import router as openapi_graph_router from .openclaw_chat import router as openclaw_chat_router from .openclaw_devices import router as openclaw_devices_router from .openclaw_instances import router as openclaw_instances_router from .openclaw_proxy import router as openclaw_proxy_router from .organizations import router as organizations_router -from .runs import router as runs_router from .sandboxes import router as sandboxes_router from .skill_collaborators import router as skill_collaborators_router from .skill_versions import router as skill_versions_router from .skills import router as skills_router +from .task_activities import router as task_activities_router +from .tasks import router as tasks_router +from .threads import router as threads_router from .tokens import router as tokens_router from .tools import router as tools_router from .traces import router as traces_router from .users import router as users_router from .version import router as version_router -from .workspace_files import router as workspace_files_router -from .workspace_folders import router as workspace_folders_router from .workspaces import router as workspaces_router ROUTERS = [ sandboxes_router, auth_router, artifacts_router, - conversations_router, files_router, memory_router, oauth_router, organizations_router, - runs_router, + agent_runs_router, + copilot_router, workspaces_router, - workspace_folders_router, - workspace_files_router, custom_tools_router, tools_router, mcp_router, @@ -62,13 +59,10 @@ model_credentials_router, models_router, model_usage_router, - graph_code_router, - graph_deployments_router, skills_router, skill_versions_router, skill_collaborators_router, tokens_router, - graphs_router, traces_router, users_router, environment_router, @@ -76,8 +70,12 @@ openclaw_chat_router, openclaw_devices_router, openclaw_proxy_router, - openapi_graph_router, version_router, + tasks_router, + executions_router, + agents_router, + task_activities_router, + threads_router, ] diff --git a/backend/app/api/v1/agent_runs.py b/backend/app/api/v1/agent_runs.py new file mode 100644 index 000000000..0f0ab2abe --- /dev/null +++ b/backend/app/api/v1/agent_runs.py @@ -0,0 +1,197 @@ +"""Agent Runs API.""" + +from __future__ import annotations + +import uuid +from typing import List + +from fastapi import APIRouter, Depends, Query +from sqlalchemy import select +from sqlalchemy.ext.asyncio import AsyncSession + +from app.common.app_errors import AccessDeniedError +from app.common.dependencies import CurrentUser, get_current_user, require_workspace_role +from app.core.database import get_db +from app.models.agent import Agent, AgentRelease, AgentVersion +from app.models.auth import AuthUser as User +from app.models.workspace import WorkspaceMemberRole +from app.schemas import BaseResponse +from app.schemas.agent_run import ( + AgentRunResponse, + CreateAgentRunRequest, + CreateDraftAgentRunRequest, +) +from app.services.agent_run_service import AgentRunService +from app.services.dispatch_service import DispatchService +from app.services.workspace_permission import check_workspace_access + +router = APIRouter(prefix="/v1/runs", tags=["Runs"]) + + +def _to_response(run) -> AgentRunResponse: + return AgentRunResponse.model_validate(run) + + +async def _get_release_workspace_id(db: AsyncSession, release_id: uuid.UUID) -> uuid.UUID | None: + return ( + await db.execute( + select(Agent.workspace_id) + .join(AgentVersion, AgentVersion.agent_id == Agent.id) + .join(AgentRelease, AgentRelease.agent_version_id == AgentVersion.id) + .where(AgentRelease.id == release_id) + ) + ).scalar_one_or_none() + + +async def _require_workspace_access( + db: AsyncSession, + workspace_id: uuid.UUID, + current_user: User, + role: WorkspaceMemberRole, +) -> None: + has_access = await check_workspace_access(db, workspace_id, current_user, role) + if not has_access: + raise AccessDeniedError("Insufficient workspace permission", code="WORKSPACE_PERMISSION_DENIED") + + +@router.get("", response_model=BaseResponse[List[AgentRunResponse]]) +async def list_runs( + current_user: User = require_workspace_role(WorkspaceMemberRole.viewer), + workspace_id: uuid.UUID | None = Query(None), + release_id: uuid.UUID | None = Query(None), + task_id: uuid.UUID | None = Query(None), + agent_id: uuid.UUID | None = Query(None), + trigger_medium: str | None = Query(None), + run_purpose: str | None = Query(None), + status: str | None = Query(None), + db: AsyncSession = Depends(get_db), +) -> BaseResponse[List[AgentRunResponse]]: + """List runs filtered by workspace_id, release_id, task_id, or agent_id.""" + service = AgentRunService(db) + runs = await service.list_runs( + workspace_id=workspace_id, + release_id=release_id, + task_id=task_id, + agent_id=agent_id, + trigger_medium=trigger_medium, + run_purpose=run_purpose, + status=status, + ) + return BaseResponse( + success=True, + code=200, + msg="ok", + data=[_to_response(r) for r in runs], + ) + + +@router.post("", response_model=BaseResponse[AgentRunResponse]) +async def create_run( + request: CreateAgentRunRequest, + current_user: CurrentUser, + db: AsyncSession = Depends(get_db), +) -> BaseResponse[AgentRunResponse]: + """Create a new agent run via the unified orchestrator.""" + workspace_id = await _get_release_workspace_id(db, request.release_id) + if not workspace_id: + raise AccessDeniedError("Insufficient workspace permission", code="WORKSPACE_PERMISSION_DENIED") + await _require_workspace_access(db, workspace_id, current_user, WorkspaceMemberRole.member) + + dispatch = DispatchService(db) + run = await dispatch.dispatch_direct( + release_id=request.release_id, + prompt=request.goal or "", + user_id=str(current_user.id), + trigger_medium=request.trigger_medium, + run_purpose=request.run_purpose, + thread_id=request.thread_id, + task_id=request.task_id, + input_payload=request.input_payload, + ) + return BaseResponse( + success=True, + code=200, + msg="Run created", + data=_to_response(run), + ) + + +@router.post("/draft", response_model=BaseResponse[AgentRunResponse]) +async def create_draft_run( + request: CreateDraftAgentRunRequest, + current_user: CurrentUser, + db: AsyncSession = Depends(get_db), +) -> BaseResponse[AgentRunResponse]: + """Create a Test Lab run against an agent draft version, not an active release.""" + await _require_workspace_access(db, request.workspace_id, current_user, WorkspaceMemberRole.member) + + dispatch = DispatchService(db) + run = await dispatch.dispatch_draft( + agent_id=request.agent_id, + version_id=request.version_id, + prompt=request.goal or "", + user_id=str(current_user.id), + workspace_id=request.workspace_id, + input_payload=request.input_payload, + ) + return BaseResponse( + success=True, + code=200, + msg="Draft run created", + data=_to_response(run), + ) + + +@router.get("/{run_id}", response_model=BaseResponse[AgentRunResponse]) +async def get_run( + run_id: uuid.UUID, + current_user: User = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> BaseResponse[AgentRunResponse]: + """Get a run by ID.""" + service = AgentRunService(db) + run = await service.get_run(run_id) + await _require_workspace_access(db, run.workspace_id, current_user, WorkspaceMemberRole.viewer) + return BaseResponse(success=True, code=200, msg="ok", data=_to_response(run)) + + +@router.post("/{run_id}/cancel", response_model=BaseResponse[AgentRunResponse]) +async def cancel_run( + run_id: uuid.UUID, + current_user: User = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> BaseResponse[AgentRunResponse]: + """Cancel a run.""" + service = AgentRunService(db) + existing_run = await service.get_run(run_id) + await _require_workspace_access( + db, + existing_run.workspace_id, + current_user, + WorkspaceMemberRole.member, + ) + + dispatch = DispatchService(db) + run = await dispatch.cancel_run(run_id) + return BaseResponse(success=True, code=200, msg="Run cancelled", data=_to_response(run)) + + +@router.post("/{run_id}/retry", response_model=BaseResponse[AgentRunResponse]) +async def retry_run( + run_id: uuid.UUID, + current_user: User = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> BaseResponse[AgentRunResponse]: + """Retry a run by creating a new execution attempt.""" + service = AgentRunService(db) + existing_run = await service.get_run(run_id) + await _require_workspace_access( + db, + existing_run.workspace_id, + current_user, + WorkspaceMemberRole.member, + ) + + dispatch = DispatchService(db) + run = await dispatch.retry_run(run_id, str(current_user.id)) + return BaseResponse(success=True, code=200, msg="Run retried", data=_to_response(run)) diff --git a/backend/app/api/v1/agents.py b/backend/app/api/v1/agents.py new file mode 100644 index 000000000..f0f1faa8e --- /dev/null +++ b/backend/app/api/v1/agents.py @@ -0,0 +1,337 @@ +"""Agents API.""" + +from __future__ import annotations + +import uuid +from typing import List, Optional + +from fastapi import APIRouter, Depends, Query +from pydantic import BaseModel as PydanticBaseModel +from sqlalchemy.ext.asyncio import AsyncSession + +from app.common.app_errors import AccessDeniedError +from app.common.dependencies import CurrentUser, require_workspace_role +from app.core.database import get_db +from app.models.agent import Agent, AgentRelease, AgentVersion +from app.models.auth import AuthUser as User +from app.models.workspace import WorkspaceMemberRole +from app.schemas import BaseResponse +from app.schemas.agent import ( + AgentResponse, + AgentSummary, + CreateAgentRequest, + UpdateAgentRequest, +) +from app.schemas.agent_release import ( + AgentReleaseResponse, + AgentReleaseSummary, +) +from app.schemas.agent_version import ( + AgentVersionResponse, + AgentVersionSummary, + CreateAgentVersionRequest, + UpdateAgentVersionRequest, +) +from app.services.agent_publish_service import AgentPublishService +from app.services.agent_release_service import AgentReleaseService +from app.services.agent_service import AgentService +from app.services.agent_version_service import AgentVersionService +from app.services.workspace_permission import check_workspace_access + + +class RollbackRequest(PydanticBaseModel): + release_id: uuid.UUID + + +class PublishAgentResponse(PydanticBaseModel): + agent: AgentResponse + release: AgentReleaseResponse + + +class RollbackAgentResponse(PydanticBaseModel): + agent: AgentResponse + + +class UnpublishAgentResponse(PydanticBaseModel): + agent: AgentResponse + release: Optional[AgentReleaseResponse] = None + + +router = APIRouter(prefix="/v1/agents", tags=["Agents"]) + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +def _to_response(agent: Agent) -> AgentResponse: + return AgentResponse.model_validate(agent) + + +def _to_summary(agent: Agent) -> AgentSummary: + return AgentSummary.model_validate(agent) + + +def _version_to_response(v: AgentVersion) -> AgentVersionResponse: + return AgentVersionResponse.model_validate(v) + + +def _version_to_summary(v: AgentVersion) -> AgentVersionSummary: + return AgentVersionSummary.model_validate(v) + + +def _release_to_response(r: AgentRelease) -> AgentReleaseResponse: + return AgentReleaseResponse.model_validate(r) + + +def _release_to_summary(r: AgentRelease) -> AgentReleaseSummary: + return AgentReleaseSummary.model_validate(r) + + +# --------------------------------------------------------------------------- +# Agent routes +# --------------------------------------------------------------------------- + + +@router.get("", response_model=BaseResponse[List[AgentSummary]]) +async def list_agents( + current_user: User = require_workspace_role(WorkspaceMemberRole.viewer), + workspace_id: uuid.UUID = Query(...), + db: AsyncSession = Depends(get_db), +) -> BaseResponse[List[AgentSummary]]: + service = AgentService(db) + agents = await service.list_agents(workspace_id) + return BaseResponse( + success=True, + code=200, + msg="ok", + data=[_to_summary(a) for a in agents], + ) + + +@router.post("", response_model=BaseResponse[AgentResponse]) +async def create_agent( + request: CreateAgentRequest, + current_user: CurrentUser, + workspace_id: uuid.UUID = Query(...), + db: AsyncSession = Depends(get_db), +) -> BaseResponse[AgentResponse]: + has_access = await check_workspace_access(db, workspace_id, current_user, WorkspaceMemberRole.member) + if not has_access: + raise AccessDeniedError("No access to workspace", code="WORKSPACE_ACCESS_DENIED") + + service = AgentService(db) + agent = await service.create_agent(workspace_id, str(current_user.id), request) + return BaseResponse(success=True, code=200, msg="Agent created", data=_to_response(agent)) + + +@router.get("/{agent_id}", response_model=BaseResponse[AgentResponse]) +async def get_agent( + agent_id: uuid.UUID, + current_user: User = require_workspace_role(WorkspaceMemberRole.viewer), + workspace_id: uuid.UUID = Query(...), + db: AsyncSession = Depends(get_db), +) -> BaseResponse[AgentResponse]: + service = AgentService(db) + agent = await service.get_agent(agent_id) + return BaseResponse(success=True, code=200, msg="ok", data=_to_response(agent)) + + +@router.patch("/{agent_id}", response_model=BaseResponse[AgentResponse]) +async def update_agent( + agent_id: uuid.UUID, + request: UpdateAgentRequest, + current_user: User = require_workspace_role(WorkspaceMemberRole.member), + workspace_id: uuid.UUID = Query(...), + db: AsyncSession = Depends(get_db), +) -> BaseResponse[AgentResponse]: + service = AgentService(db) + agent = await service.update_agent(agent_id, request) + return BaseResponse(success=True, code=200, msg="Agent updated", data=_to_response(agent)) + + +@router.delete("/{agent_id}", response_model=BaseResponse) +async def delete_agent( + agent_id: uuid.UUID, + current_user: User = require_workspace_role(WorkspaceMemberRole.member), + workspace_id: uuid.UUID = Query(...), + db: AsyncSession = Depends(get_db), +) -> BaseResponse: + service = AgentService(db) + await service.delete_agent(agent_id) + return BaseResponse(success=True, code=200, msg="Agent deleted") + + +# --------------------------------------------------------------------------- +# AgentVersion sub-routes +# --------------------------------------------------------------------------- + + +@router.get("/{agent_id}/versions", response_model=BaseResponse[List[AgentVersionSummary]]) +async def list_versions( + agent_id: uuid.UUID, + current_user: User = require_workspace_role(WorkspaceMemberRole.viewer), + workspace_id: uuid.UUID = Query(...), + db: AsyncSession = Depends(get_db), +) -> BaseResponse[List[AgentVersionSummary]]: + service = AgentVersionService(db) + versions = await service.list_versions(agent_id) + return BaseResponse( + success=True, + code=200, + msg="ok", + data=[_version_to_summary(v) for v in versions], + ) + + +@router.post("/{agent_id}/versions", response_model=BaseResponse[AgentVersionResponse]) +async def create_version( + agent_id: uuid.UUID, + request: CreateAgentVersionRequest, + current_user: CurrentUser, + workspace_id: uuid.UUID = Query(...), + db: AsyncSession = Depends(get_db), +) -> BaseResponse[AgentVersionResponse]: + has_access = await check_workspace_access(db, workspace_id, current_user, WorkspaceMemberRole.member) + if not has_access: + raise AccessDeniedError("No access to workspace", code="WORKSPACE_ACCESS_DENIED") + + service = AgentVersionService(db) + version = await service.create_version(agent_id, str(current_user.id), request) + return BaseResponse(success=True, code=200, msg="Version created", data=_version_to_response(version)) + + +@router.get("/{agent_id}/versions/{version_id}", response_model=BaseResponse[AgentVersionResponse]) +async def get_version( + agent_id: uuid.UUID, + version_id: uuid.UUID, + current_user: User = require_workspace_role(WorkspaceMemberRole.viewer), + workspace_id: uuid.UUID = Query(...), + db: AsyncSession = Depends(get_db), +) -> BaseResponse[AgentVersionResponse]: + service = AgentVersionService(db) + version = await service.get_version(version_id) + return BaseResponse(success=True, code=200, msg="ok", data=_version_to_response(version)) + + +@router.patch("/{agent_id}/versions/{version_id}", response_model=BaseResponse[AgentVersionResponse]) +async def update_version( + agent_id: uuid.UUID, + version_id: uuid.UUID, + request: UpdateAgentVersionRequest, + current_user: User = require_workspace_role(WorkspaceMemberRole.member), + workspace_id: uuid.UUID = Query(...), + db: AsyncSession = Depends(get_db), +) -> BaseResponse[AgentVersionResponse]: + service = AgentVersionService(db) + version = await service.update_version(version_id, request, user_id=current_user.id) + return BaseResponse(success=True, code=200, msg="Version updated", data=_version_to_response(version)) + + +# --------------------------------------------------------------------------- +# AgentRelease sub-routes +# --------------------------------------------------------------------------- + + +@router.get("/{agent_id}/releases", response_model=BaseResponse[List[AgentReleaseSummary]]) +async def list_releases( + agent_id: uuid.UUID, + current_user: User = require_workspace_role(WorkspaceMemberRole.viewer), + workspace_id: uuid.UUID = Query(...), + db: AsyncSession = Depends(get_db), +) -> BaseResponse[List[AgentReleaseSummary]]: + service = AgentReleaseService(db) + releases = await service.list_releases(agent_id) + return BaseResponse( + success=True, + code=200, + msg="ok", + data=[_release_to_summary(r) for r in releases], + ) + + +@router.get("/{agent_id}/releases/{release_id}", response_model=BaseResponse[AgentReleaseResponse]) +async def get_release( + agent_id: uuid.UUID, + release_id: uuid.UUID, + current_user: User = require_workspace_role(WorkspaceMemberRole.viewer), + workspace_id: uuid.UUID = Query(...), + db: AsyncSession = Depends(get_db), +) -> BaseResponse[AgentReleaseResponse]: + service = AgentReleaseService(db) + release = await service.get_release(release_id) + return BaseResponse(success=True, code=200, msg="ok", data=_release_to_response(release)) + + +@router.post("/{agent_id}/releases/{release_id}/retire", response_model=BaseResponse[AgentReleaseResponse]) +async def retire_release( + agent_id: uuid.UUID, + release_id: uuid.UUID, + current_user: User = require_workspace_role(WorkspaceMemberRole.admin), + workspace_id: uuid.UUID = Query(...), + db: AsyncSession = Depends(get_db), +) -> BaseResponse[AgentReleaseResponse]: + service = AgentPublishService(db) + result = await service.retire(agent_id, release_id) + return BaseResponse(success=True, code=200, msg="ok", data=_release_to_response(result["release"])) + + +@router.post("/{agent_id}/publish", response_model=BaseResponse[PublishAgentResponse]) +async def publish_agent( + agent_id: uuid.UUID, + current_user: User = require_workspace_role(WorkspaceMemberRole.admin), + workspace_id: uuid.UUID = Query(...), + db: AsyncSession = Depends(get_db), +) -> BaseResponse[PublishAgentResponse]: + service = AgentPublishService(db) + result = await service.publish(agent_id, current_user.id) + return BaseResponse( + success=True, + code=200, + msg="ok", + data=PublishAgentResponse( + agent=_to_response(result["agent"]), + release=_release_to_response(result["release"]), + ), + ) + + +@router.post("/{agent_id}/rollback", response_model=BaseResponse[RollbackAgentResponse]) +async def rollback_agent( + agent_id: uuid.UUID, + body: RollbackRequest, + current_user: User = require_workspace_role(WorkspaceMemberRole.admin), + workspace_id: uuid.UUID = Query(...), + db: AsyncSession = Depends(get_db), +) -> BaseResponse[RollbackAgentResponse]: + service = AgentPublishService(db) + result = await service.rollback(agent_id, body.release_id) + return BaseResponse( + success=True, + code=200, + msg="ok", + data=RollbackAgentResponse( + agent=_to_response(result["agent"]), + ), + ) + + +@router.post("/{agent_id}/unpublish", response_model=BaseResponse[UnpublishAgentResponse]) +async def unpublish_agent( + agent_id: uuid.UUID, + current_user: User = require_workspace_role(WorkspaceMemberRole.admin), + workspace_id: uuid.UUID = Query(...), + db: AsyncSession = Depends(get_db), +) -> BaseResponse[UnpublishAgentResponse]: + service = AgentPublishService(db) + result = await service.unpublish(agent_id) + return BaseResponse( + success=True, + code=200, + msg="ok", + data=UnpublishAgentResponse( + agent=_to_response(result["agent"]), + release=_release_to_response(result["release"]) if result["release"] else None, + ), + ) diff --git a/backend/app/api/v1/artifacts.py b/backend/app/api/v1/artifacts.py index ebb532e2e..ad27d995a 100644 --- a/backend/app/api/v1/artifacts.py +++ b/backend/app/api/v1/artifacts.py @@ -13,8 +13,8 @@ from loguru import logger from sqlalchemy.ext.asyncio import AsyncSession +from app.common.app_errors import AppError, InternalServiceError, InvalidRequestError, NotFoundError from app.common.dependencies import CurrentUser -from app.common.exceptions import AppException, BadRequestException, InternalServerException, NotFoundException from app.common.response import success_response from app.core.agent.artifacts import ArtifactResolver, FileInfo, RunInfo from app.core.database import get_db @@ -91,7 +91,11 @@ async def download_artifact_file( """Download or preview a file from the run. Returns file with appropriate Content-Type.""" path = resolver.get_file_path(str(current_user.id), thread_id, run_id, file_path) if path is None: - raise NotFoundException("File not found or path invalid") + raise NotFoundError( + "File not found or path invalid", + code="ARTIFACT_FILE_NOT_FOUND", + data={"thread_id": thread_id, "run_id": run_id, "file_path": file_path}, + ) filename = path.name media_type, _ = mimetypes.guess_type(str(path)) return FileResponse( @@ -115,7 +119,7 @@ async def live_read_file( user_id = str(current_user.id) record = await service.get_user_sandbox_record(user_id) if not record: - raise NotFoundException("No sandbox found") + raise NotFoundError("No sandbox found", code="SANDBOX_NOT_FOUND", data={"user_id": user_id}) handle = None adapter = await _sandbox_pool.get(record.id) @@ -129,7 +133,7 @@ async def live_read_file( adapter = handle.adapter except Exception as e: logger.warning(f"Sandbox reconnect failed for user {user_id}: {e}", exc_info=True) - raise NotFoundException("Sandbox not running") + raise NotFoundError("Sandbox not running", code="SANDBOX_NOT_RUNNING", data={"user_id": user_id}) try: raw_read = getattr(adapter, "raw_read", None) @@ -138,13 +142,21 @@ async def live_read_file( else: content = adapter.read(file_path) if content.startswith("[Error:") or content.startswith("Error:"): - raise NotFoundException(content) + raise NotFoundError( + content, + code="ARTIFACT_FILE_NOT_FOUND", + data={"thread_id": thread_id, "file_path": file_path}, + ) return PlainTextResponse(content) - except AppException: + except AppError: raise except Exception as e: logger.warning(f"Live read failed for {file_path}: {e}") - raise InternalServerException(f"Failed to read file: {e}") + raise InternalServiceError( + "Failed to read file", + code="ARTIFACT_FILE_READ_FAILED", + data={"thread_id": thread_id, "file_path": file_path}, + ) from e finally: if handle: await handle.release() @@ -162,5 +174,9 @@ async def delete_artifact_run( """Delete all artifacts for the given run.""" ok = resolver.delete_run(str(current_user.id), thread_id, run_id) if not ok: - raise BadRequestException("Delete failed or path invalid") + raise InvalidRequestError( + "Delete failed or path invalid", + code="ARTIFACT_RUN_DELETE_FAILED", + data={"thread_id": thread_id, "run_id": run_id}, + ) return success_response(message="Run artifacts deleted", data={"run_id": run_id}) diff --git a/backend/app/api/v1/auth.py b/backend/app/api/v1/auth.py index a82f35946..2c7fff64b 100644 --- a/backend/app/api/v1/auth.py +++ b/backend/app/api/v1/auth.py @@ -10,7 +10,7 @@ from pydantic import BaseModel, ConfigDict, EmailStr, Field from sqlalchemy.ext.asyncio import AsyncSession -from app.common.exceptions import AppException, UnauthorizedException +from app.common.app_errors import AppError, AuthenticationError from app.common.response import success_response from app.core.database import AsyncSessionLocal, get_db from app.core.rate_limit import auth_rate_limit, strict_rate_limit @@ -217,7 +217,7 @@ async def logout( try: current_user = await _get_current_auth_user(token, db, request) user_id = current_user.id - except AppException: + except AppError: logger.debug("Failed to resolve current user during logout", exc_info=True) if refresh_token and user_id: @@ -357,7 +357,7 @@ async def get_session( # Pass request to read token from Cookie current_user = await _get_current_auth_user(token, db, request) return success_response(data={"user": _user_to_response(current_user)}) - except AppException: + except AppError: # Return null user when unauthenticated return success_response(data={"user": None}) @@ -429,7 +429,7 @@ async def refresh_token( except Exception: logger.debug("Failed to refresh token via cookie refresh_token", exc_info=True) - raise UnauthorizedException("Invalid or expired refresh token") + raise AuthenticationError("Invalid or expired refresh token", code="REFRESH_TOKEN_INVALID") # Helpers @@ -437,7 +437,7 @@ async def refresh_token( def _extract_bearer(auth_header: Optional[str]) -> str: if not auth_header or not auth_header.lower().startswith("bearer "): - raise UnauthorizedException("Missing bearer token") + raise AuthenticationError("Missing bearer token", code="BEARER_TOKEN_MISSING") return auth_header.split(" ", 1)[1] @@ -450,7 +450,7 @@ async def _get_current_auth_user( if auth_header: try: token = _extract_bearer(auth_header) - except UnauthorizedException: + except AuthenticationError: logger.debug("Failed to extract bearer token from Authorization header", exc_info=True) if not token and request: @@ -462,7 +462,7 @@ async def _get_current_auth_user( logger.debug("Failed to read token from cookies", exc_info=True) if not token: - raise UnauthorizedException("Missing credentials") + raise AuthenticationError("Missing credentials", code="MISSING_CREDENTIALS") user_service = AuthService(db) @@ -472,7 +472,7 @@ async def _get_current_auth_user( user = await user_service.get_user_by_id(str(user_id)) if user and user.is_active: return user - raise UnauthorizedException("User not found or inactive") + raise AuthenticationError("User not found or inactive", code="USER_INVALID") session_service = AuthSessionService(db) session = await session_service.get_session_by_token(token) @@ -480,9 +480,9 @@ async def _get_current_auth_user( user = await user_service.user_repo.get(uuid.UUID(session.user_id)) if user and user.is_active: return user - raise UnauthorizedException("User not found or inactive") + raise AuthenticationError("User not found or inactive", code="USER_INVALID") - raise UnauthorizedException("Invalid or expired token") + raise AuthenticationError("Invalid or expired token", code="TOKEN_INVALID") def _user_to_response(user: AuthUser) -> UserResponse: diff --git a/backend/app/api/v1/chat.py b/backend/app/api/v1/chat.py deleted file mode 100644 index f7770f06b..000000000 --- a/backend/app/api/v1/chat.py +++ /dev/null @@ -1,537 +0,0 @@ -""" -Module: Chat API (Production Ready) - -Overview: -- Streaming helper module reused by the chat WebSocket handler -- Provide LangGraph event dispatch, state queries, message persistence, and result archival -- No longer exposes a `/v1/chat` HTTP endpoint - -Dependencies: -- Database: async SQLAlchemy session -- LangGraph: v2 event stream processing -- WebSocket chat handler: `app.websocket.chat_ws_handler` -""" - -import asyncio -import uuid -from typing import Any, AsyncGenerator, Dict - -from langchain.messages import AIMessage -from langchain_core.messages.base import BaseMessage -from langchain_core.runnables import RunnableConfig -from loguru import logger -from sqlalchemy import select -from sqlalchemy.ext.asyncio import AsyncSession - -from app.core.database import AsyncSessionLocal -from app.core.settings import settings -from app.models import Conversation, Message -from app.utils.datetime import utc_now -from app.utils.file_event_emitter import FileEventEmitter -from app.utils.stream_event_handler import StreamEventHandler, StreamState - -# LangGraph control-flow exception: do not mark trace as FAILED -try: - from langgraph.errors import GraphBubbleUp -except ImportError: - GraphBubbleUp = None # type: ignore[misc, assignment] - - -async def safe_get_state( - graph: Any, config: RunnableConfig, max_retries: int = 3, initial_delay: float = 0.1, log: Any = None -) -> Any: - """ - Safely retrieve graph state with retry logic to avoid connection conflicts. - - Args: - graph: LangGraph graph instance - config: RunnableConfig configuration - max_retries: maximum number of retries - initial_delay: initial delay in seconds, doubled on each retry - log: optional logger - - Returns: - Graph state snapshot - - Raises: - Exception: if all retries are exhausted - """ - if log is None: - log = logger - - last_error = None - delay = initial_delay - - for attempt in range(max_retries): - try: - snap = await graph.aget_state(config) - return snap - except Exception as e: - last_error = e - error_msg = str(e) - - # check for connection conflict error - is_connection_error = ( - "another command is already in progress" in error_msg.lower() or "connection" in error_msg.lower() - ) - - # last attempt — stop retrying - if attempt >= max_retries - 1: - break - - # connection error — wait and retry - if is_connection_error: - log.debug( - f"Connection conflict detected (attempt {attempt + 1}/{max_retries}), " - f"retrying after {delay:.2f}s delay" - ) - await asyncio.sleep(delay) - delay *= 2 # exponential backoff - else: - # non-connection error — log warning but still retry (may be transient) - log.warning(f"Failed to get state (attempt {attempt + 1}/{max_retries}): {e}") - await asyncio.sleep(delay) - delay *= 2 - - # all retries exhausted - log.error(f"Failed to get state after {max_retries} attempts: {last_error}") - if last_error is not None: - raise last_error - raise RuntimeError("Failed to get state after all retries") - - -# ==================== Persistence Logic ==================== - - -async def save_run_result( - thread_id: str, - state: StreamState, - log, - *, - graph_id: str | None = None, - workspace_id: str | None = None, - user_id: str | None = None, - graph_name: str | None = None, -) -> None: - """ - Persist run results. - - Use a fresh DB session to ensure the connection is available even when - called from a finally block. Also batch-persist Trace + Observations. - """ - # --- 1. persist messages --- - if state.assistant_content or state.all_messages: - if not state.all_messages and state.assistant_content: - log.warning(f"Using fallback content accumulation for thread {thread_id}") - state.all_messages = [AIMessage(content=state.assistant_content)] - - if state.all_messages: - try: - async with AsyncSessionLocal() as session: - await save_assistant_message(thread_id, state.all_messages, session, update_conversation=True) - log.info(f"Persisted messages for thread {thread_id}") - except asyncio.CancelledError: - log.warning(f"Save run result cancelled for thread {thread_id}") - except Exception as e: - log.error(f"Failed to persist messages for thread {thread_id}: {e}") - - # --- 2. persist Trace + Observations (transaction-safe) --- - all_observations = state.get_all_observations() - if all_observations: - try: - await _persist_trace_data( - state, - log, - observations=all_observations, - graph_id=graph_id, - workspace_id=workspace_id, - user_id=user_id, - graph_name=graph_name, - ) - except asyncio.CancelledError: - log.debug(f"Trace persistence cancelled for thread {thread_id}") - except Exception as e: - log.warning(f"Failed to persist trace data for thread {thread_id}: {e}") - - -async def _persist_trace_data( - state: StreamState, - log, - *, - observations: list | None = None, - graph_id: str | None = None, - workspace_id: str | None = None, - user_id: str | None = None, - graph_name: str | None = None, -) -> None: - """ - Batch-write accumulated Observation data from StreamState to the database. - - Transaction-safe: uses session.begin() for atomicity. - Incomplete observations are marked INTERRUPTED by state.get_all_observations(). - """ - from datetime import datetime, timezone - - from app.models.execution_trace import ( - ExecutionObservation, - ExecutionTrace, - ObservationLevel, - ObservationStatus, - ObservationType, - TraceStatus, - ) - from app.utils.stream_event_handler import ObsLevel, ObsStatus, ObsType - - all_obs = observations if observations is not None else state.get_all_observations() - if not all_obs: - return - - # determine trace status - if state.has_error: - trace_status = TraceStatus.FAILED - elif state.interrupted: - trace_status = TraceStatus.INTERRUPTED - elif state.stopped: - trace_status = TraceStatus.FAILED - else: - trace_status = TraceStatus.COMPLETED - - now = datetime.now(timezone.utc) - trace_start = datetime.fromtimestamp(state.trace_start_time / 1000, tz=timezone.utc) - duration_ms = int(now.timestamp() * 1000 - state.trace_start_time) - - # aggregate token statistics - total_tokens = 0 - for obs_rec in all_obs: - if obs_rec.type == ObsType.GENERATION and obs_rec.total_tokens: - total_tokens += obs_rec.total_tokens - - # build ExecutionTrace ORM object - trace_uuid = uuid.UUID(state.trace_id) - trace = ExecutionTrace( - id=trace_uuid, - workspace_id=uuid.UUID(workspace_id) if workspace_id else None, - graph_id=uuid.UUID(graph_id) if graph_id else None, - thread_id=state.thread_id, - user_id=user_id, - name=graph_name or "graph_execution", - status=trace_status, - start_time=trace_start, - end_time=now, - duration_ms=duration_ms, - total_tokens=total_tokens or None, - ) - - # enum mapping - type_map = { - ObsType.SPAN: ObservationType.SPAN, - ObsType.GENERATION: ObservationType.GENERATION, - ObsType.TOOL: ObservationType.TOOL, - ObsType.EVENT: ObservationType.EVENT, - } - level_map = { - ObsLevel.DEBUG: ObservationLevel.DEBUG, - ObsLevel.DEFAULT: ObservationLevel.DEFAULT, - ObsLevel.WARNING: ObservationLevel.WARNING, - ObsLevel.ERROR: ObservationLevel.ERROR, - } - status_map = { - ObsStatus.RUNNING: ObservationStatus.RUNNING, - ObsStatus.COMPLETED: ObservationStatus.COMPLETED, - ObsStatus.FAILED: ObservationStatus.FAILED, - ObsStatus.INTERRUPTED: ObservationStatus.INTERRUPTED, - } - - # build ExecutionObservation ORM objects - db_observations = [] - for rec in all_obs: - obs = ExecutionObservation( - id=uuid.UUID(rec.id), - trace_id=trace_uuid, - parent_observation_id=uuid.UUID(rec.parent_observation_id) if rec.parent_observation_id else None, - type=type_map.get(rec.type, ObservationType.EVENT), - name=rec.name, - level=level_map.get(rec.level, ObservationLevel.DEFAULT), - status=status_map.get(rec.status, ObservationStatus.COMPLETED), - status_message=rec.status_message, - start_time=datetime.fromtimestamp(rec.start_time / 1000, tz=timezone.utc), - end_time=datetime.fromtimestamp(rec.end_time / 1000, tz=timezone.utc) if rec.end_time else None, - duration_ms=rec.duration_ms, - completion_start_time=( - datetime.fromtimestamp(rec.completion_start_time / 1000, tz=timezone.utc) - if rec.completion_start_time - else None - ), - input=rec.input_data, - output=rec.output_data, - model_name=rec.model_name, - model_provider=rec.model_provider, - model_parameters=rec.model_parameters, - prompt_tokens=rec.prompt_tokens, - completion_tokens=rec.completion_tokens, - total_tokens=rec.total_tokens, - metadata_=rec.metadata, - version=rec.version, - ) - db_observations.append(obs) - - # transaction-safe batch insert - async with AsyncSessionLocal() as session: - async with session.begin(): - session.add(trace) - session.add_all(db_observations) - # commit is automatic when begin() context exits - log.info(f"Persisted trace {state.trace_id} with {len(db_observations)} observations | thread={state.thread_id}") - - -# ==================== Database Operations ==================== - - -async def get_or_create_conversation( - thread_id: str | None, - message: str, - user_id: str, - metadata: dict | None, - db: AsyncSession, -) -> tuple[str, Conversation]: - if not thread_id: - # No thread_id provided, create new conversation - thread_id = str(uuid.uuid4()) - conversation = Conversation( - thread_id=thread_id, - user_id=user_id, - title=message[:50] if len(message) > 50 else message, - meta_data=metadata or {}, - ) - db.add(conversation) - await db.commit() - return thread_id, conversation - else: - # Thread_id provided, try to find existing conversation - result = await db.execute( - select(Conversation).where(Conversation.thread_id == thread_id, Conversation.user_id == user_id) - ) - conv = result.scalar_one_or_none() - if not conv: - # Conversation not found - create new one with the provided thread_id - # This allows frontend to generate thread_id and let backend create conversation on first message - conversation = Conversation( - thread_id=thread_id, - user_id=user_id, - title=message[:50] if len(message) > 50 else message, - meta_data=metadata or {}, - ) - db.add(conversation) - await db.commit() - await db.refresh(conversation) - return thread_id, conversation - return thread_id, conv - - -async def get_user_config(user_id: str, thread_id: str): - """Retrieve user configuration (RunnableConfig for LangGraph).""" - from app.core.agent.langfuse_callback import get_langfuse_callbacks - from app.core.trace_context import get_trace_id - - config: RunnableConfig = { - "configurable": {"thread_id": thread_id, "user_id": str(user_id), "trace_id": get_trace_id()}, - "recursion_limit": 300, - "callbacks": get_langfuse_callbacks(enabled=settings.langfuse_enabled), - } - - return config, {} - - -async def save_user_message(thread_id: str, message: str, metadata: dict | None, db: AsyncSession): - user_message = Message( - thread_id=thread_id, - role="user", - content=message, - meta_data=metadata or {}, - ) - db.add(user_message) - await db.commit() - - -async def save_assistant_message( - thread_id: str, messages: list[BaseMessage], db: AsyncSession, update_conversation: bool = True -): - """Save assistant message, extracting tool calls if present.""" - # find the last AI message - ai_msg = next((m for m in reversed(messages) if isinstance(m, AIMessage)), None) - if not ai_msg: - return - - meta_data = dict(ai_msg.additional_kwargs) if ai_msg.additional_kwargs else {} - - # extract tool calls (simplified — a strict implementation would match subsequent ToolMessages by ID) - if hasattr(ai_msg, "tool_calls") and ai_msg.tool_calls: - tool_calls_data = [] - for tc in ai_msg.tool_calls: - tool_calls_data.append({"name": tc.get("name"), "arguments": tc.get("args"), "id": tc.get("id")}) - meta_data["tool_calls"] = tool_calls_data - - message = Message( - thread_id=thread_id, - role="assistant", - content=str(ai_msg.content) if ai_msg.content else "", - meta_data=meta_data, - ) - db.add(message) - - if update_conversation: - result = await db.execute(select(Conversation).where(Conversation.thread_id == thread_id)) - if conv := result.scalar_one_or_none(): - conv.updated_at = utc_now() - await db.commit() - - -async def _clear_interrupt_marker(thread_id: str, log: Any) -> None: - """Clear the interrupted_graph_id marker from Conversation metadata.""" - try: - async with AsyncSessionLocal() as session: - result_query = await session.execute(select(Conversation).where(Conversation.thread_id == thread_id)) - if conv := result_query.scalar_one_or_none(): - if conv.meta_data and "interrupted_graph_id" in conv.meta_data: - del conv.meta_data["interrupted_graph_id"] - await session.commit() - log.debug(f"Cleared interrupt marker from conversation | thread_id={thread_id}") - except asyncio.CancelledError: - log.debug(f"Clear interrupt marker cancelled for thread {thread_id} (connection closing)") - except Exception as e: - log.warning(f"Failed to clear interrupt marker for conversation | thread_id={thread_id} | error={e}") - - -# ==================== Message Enrichment ==================== - - -def _enrich_message(message: str, metadata: dict, *, is_new_thread: bool, log, endpoint: str) -> str: - """Append edit_skill_id context (first message only) and file info to user message.""" - enriched = message - - # Only inject editing context on the first message of a new thread - edit_skill_id = metadata.get("edit_skill_id") - if edit_skill_id and is_new_thread: - log.info(f"[{endpoint}] edit-skill mode: edit_skill_id={edit_skill_id}") - enriched += ( - f"\n\n[Editing Mode] The user wants to modify an existing skill (ID: {edit_skill_id}). " - f"The skill files have been pre-loaded into the sandbox. " - f"Read the existing files first, then apply the user's requested changes." - ) - - files = metadata.get("files", []) - if files: - log.info(f"[{endpoint}] found {len(files)} attached file(s): {files}") - file_lines = "\n".join([f"- {f['filename']}: {f['path']}" for f in files]) - enriched += f"\n\nAttached files:\n{file_lines}\nUse the read_file tool to read the content of these files." - log.info(f"[{endpoint}] message enriched with file paths, length={len(enriched)}") - - return enriched - - -# ==================== Event Dispatch Helpers ==================== - - -def _extract_run_ids(event_dict: dict) -> tuple[str, str | None]: - """ - Extract run_id and parent_run_id from a LangGraph v2 event. - - Each LangGraph v2 astream_events event contains: - - run_id: unique identifier for the event (UUID or str) - - parent_ids: list ordered from root to immediate parent - - All values are normalised to str to avoid UUID-as-dict-key issues. - """ - raw_run_id = event_dict.get("run_id") - run_id = str(raw_run_id) if raw_run_id else "" - parent_ids = event_dict.get("parent_ids", []) - parent_run_id = str(parent_ids[-1]) if parent_ids else None - return run_id, parent_run_id - - -async def _dispatch_stream_event( - event: Any, - handler: StreamEventHandler, - state: StreamState, - file_emitter: FileEventEmitter | None = None, -) -> AsyncGenerator[str, None]: - """ - Translate a single LangGraph v2 astream_events event into SSE strings. - - Yields zero or more SSE strings. Callers: ``async for sse in _dispatch_stream_event(...): yield sse``. - file_emitter is only passed by chat_stream (not chat_resume). - """ - event_dict: dict[str, Any] - if isinstance(event, dict): - event_dict = event # type: ignore[assignment] - else: - event_dict = {"event": str(type(event).__name__), "data": event} if event else {} - - event_type = event_dict.get("event") - event_name = event_dict.get("name", "") - metadata = event_dict.get("metadata", {}) if isinstance(event_dict.get("metadata"), dict) else {} - langgraph_node = metadata.get("langgraph_node") - - is_node_event = langgraph_node is not None or ( - event_name - and "node" in event_name.lower() - and "tool" not in event_name.lower() - and "model" not in event_name.lower() - and "llm" not in event_name.lower() - and "chat" not in event_name.lower() - ) - - run_id, parent_run_id = _extract_run_ids(event_dict) - - if event_type == "on_chat_model_start": - yield await handler.handle_chat_model_start(event_dict, state, run_id, parent_run_id) - - elif event_type == "on_chat_model_stream": - if sse := await handler.handle_chat_model_stream(event_dict, state, run_id, parent_run_id): - yield sse - - elif event_type == "on_chat_model_end": - yield await handler.handle_chat_model_end(event_dict, state, run_id, parent_run_id) - - elif event_type == "on_tool_start": - yield await handler.handle_tool_start(event_dict, state, run_id, parent_run_id) - - elif event_type == "on_tool_end": - yield await handler.handle_tool_end(event_dict, state, run_id, parent_run_id) - - elif event_type == "on_chain_start" and is_node_event: - yield await handler.handle_node_start(event_dict, state, run_id, parent_run_id) - - elif event_type == "on_chain_end": - if is_node_event: - result = await handler.handle_node_end(event_dict, state, run_id, parent_run_id) - if isinstance(result, list): - for event_str in result: - if event_str and event_str.strip(): - yield event_str.strip() + "\n\n" - elif isinstance(result, str) and result.strip(): - yield result - - data_raw: Any = event_dict.get("data", {}) - data: Dict[str, Any] = data_raw if isinstance(data_raw, dict) else {} # type: ignore[assignment] - output = data.get("output") if isinstance(data, dict) else None - if output and isinstance(output, dict) and "messages" in output: - msgs = output["messages"] - from langgraph.types import Overwrite - - state.all_messages = msgs.value if isinstance(msgs, Overwrite) else msgs - - # Drain file events (chat_stream only) - if file_emitter is not None: - for file_evt in file_emitter.drain(): - yield handler.format_sse( - "file_event", - { - "action": file_evt.action, - "path": file_evt.path, - "size": file_evt.size, - "timestamp": file_evt.timestamp, - }, - state.thread_id, - state, - ) diff --git a/backend/app/api/v1/conversations.py b/backend/app/api/v1/conversations.py deleted file mode 100644 index 44357d34b..000000000 --- a/backend/app/api/v1/conversations.py +++ /dev/null @@ -1,927 +0,0 @@ -""" -Module: Conversations API - -Overview: -- Provides conversation create, query, update, delete (soft/hard delete) -- Provides message pagination, checkpoint retrieval, and conversation reset -- Supports conversation data export/import and full-text search -- Provides per-user conversation statistics - -Routes: -- POST /conversations: Create conversation -- GET /conversations: Get conversation list (paginated) -- DELETE /conversations/all: Delete all historical conversations (soft/hard) -- GET /conversations/{thread_id}: Get conversation details -- PATCH /conversations/{thread_id}: Update conversation -- DELETE /conversations/{thread_id}: Delete conversation (soft/hard) -- POST /conversations/{thread_id}/reset: Reset conversation (clear messages and checkpoints) -- GET /conversations/{thread_id}/messages: Get conversation messages (paginated) -- GET /conversations/{thread_id}/checkpoints: Get conversation checkpoints -- GET /conversations/{thread_id}/export: Export conversation (hidden from OpenAPI) -- POST /conversations/import: Import conversation (hidden from OpenAPI) -- POST /conversations/search: Search conversations and messages -- GET /conversations/users/stats: Get current user's conversation statistics - -Dependencies: -- Auth: CurrentUser -- Database: AsyncSession (Depends(get_db)) -- Graph: LangGraph checkpoints via checkpointer -- Utilities: utc_now, SQLAlchemy select/func, etc. - -Requests/Responses: -- Pagination: PaginationParams, PageResult[T] -- Conversation/Message models: ConversationCreate/Update/Response/DetailResponse, MessageResponse -- Others: CheckpointResponse, ConversationExportResponse, ConversationImportRequest, SearchRequest/Response, UserStatsResponse -- Unified response: BaseResponse[T] - -Error codes: -- 404: Conversation not found or not owned by current user -- 400: Invalid parameters or import/export failure -- 500: Internal server error -""" - -import uuid - -from fastapi import APIRouter, Depends, Query -from langchain_core.runnables import RunnableConfig -from loguru import logger -from sqlalchemy import delete, func, select -from sqlalchemy.ext.asyncio import AsyncSession - -from app.common.dependencies import CurrentUser -from app.common.exceptions import InternalServerException, raise_not_found_error -from app.common.pagination import ConversationMessagesPaginationParams, PageResult, PaginationParams, Paginator -from app.core.agent.checkpointer.checkpointer import get_checkpointer -from app.core.database import get_db -from app.models import Conversation, Message -from app.schemas import ( - BaseResponse, - CheckpointResponse, - ConversationCreate, - ConversationDetailResponse, - ConversationExportResponse, - ConversationImportRequest, - ConversationMessageResponse, - ConversationResponse, - ConversationUpdate, - SearchRequest, - SearchResponse, - UserStatsResponse, -) -from app.utils.datetime import utc_now - -router = APIRouter(prefix="/v1/conversations", tags=["Conversations"]) - - -# ==================== Helper functions ==================== - - -async def verify_conversation_ownership(thread_id: str, user_id: str, db: AsyncSession) -> Conversation: - """Verify conversation ownership""" - result = await db.execute( - select(Conversation).where(Conversation.thread_id == thread_id, Conversation.user_id == user_id) - ) - conversation = result.scalar_one_or_none() - if not conversation: - raise_not_found_error("Conversation") - # At this point, conversation is guaranteed to be non-None - assert conversation is not None - return conversation - - -# ==================== Conversation management endpoints ==================== - - -@router.post( - "", - response_model=BaseResponse[ConversationResponse], - summary="Create conversation", - description="Create a new conversation for the current user.", - responses={ - 401: {"description": "Unauthorized"}, - 500: {"description": "Internal server error"}, - }, -) -async def create_conversation( - conv: ConversationCreate, - current_user: CurrentUser, - db: AsyncSession = Depends(get_db), -) -> BaseResponse[ConversationResponse]: - """ - Create a new conversation - - Args: - conv: Conversation creation request - current_user: Current user - db: Database session - - Returns: - BaseResponse[ConversationResponse]: Conversation response - """ - conversation = Conversation( - thread_id=str(uuid.uuid4()), - user_id=current_user.id, # Use current user ID - title=conv.title, - meta_data=conv.metadata or {}, - ) - db.add(conversation) - await db.commit() - await db.refresh(conversation) - - return BaseResponse( - success=True, - code=201, - msg="Conversation created successfully", - data=ConversationResponse( - id=conversation.id, - thread_id=conversation.thread_id, - user_id=conversation.user_id, - title=conversation.title, - metadata=conversation.meta_data or {}, - created_at=conversation.created_at, - updated_at=conversation.updated_at, - message_count=0, - ), - ) - - -@router.get( - "", - response_model=BaseResponse[PageResult[ConversationResponse]], - summary="List conversations", - description="List the current user's conversations with pagination.", - responses={ - 401: {"description": "Unauthorized"}, - 500: {"description": "Internal server error"}, - }, -) -async def list_conversations( - current_user: CurrentUser, - page: int = Query(default=1, ge=1, description="Page number"), - page_size: int = Query(default=20, ge=1, le=100, description="Items per page"), - db: AsyncSession = Depends(get_db), -) -> BaseResponse[PageResult[ConversationResponse]]: - """ - Get the current user's conversation list - - Args: - current_user: Current user - page: Page number (starting from 1) - page_size: Number of items per page - db: Database session - - Returns: - BaseResponse[PageResult[ConversationResponse]]: Paginated conversation list - """ - # Create PaginationParams from query parameters - page_query = PaginationParams(page=page, page_size=page_size) - - paginator = Paginator(db) - page_result = await paginator.paginate( - select(Conversation) - .where(Conversation.user_id == current_user.id, Conversation.is_active == 1) - .order_by(Conversation.updated_at.desc()), - page_query, - ) - conversations = page_result.items - - response_list = [] - for conv in conversations: - # Get message count - count_result = await db.execute(select(func.count(Message.id)).where(Message.thread_id == conv.thread_id)) - message_count = count_result.scalar() or 0 - - response_list.append( - ConversationResponse( - id=conv.id, - thread_id=conv.thread_id, - user_id=conv.user_id, - title=conv.title, - metadata=conv.meta_data or {}, - created_at=conv.created_at, - updated_at=conv.updated_at, - message_count=message_count, - ) - ) - - return BaseResponse( - success=True, - code=200, - msg="Fetched conversation list successfully", - data=PageResult( - items=response_list, - total=page_result.total, - page=page_result.page, - page_size=page_result.page_size, - pages=page_result.pages, - ), - ) - - -@router.delete( - "/all", - summary="Delete all historical conversations", - description="Delete all conversations for the current user. Supports soft delete or hard delete.", - responses={ - 401: {"description": "Unauthorized"}, - 500: {"description": "Internal server error"}, - }, -) -async def delete_all_conversations( - current_user: CurrentUser, - db: AsyncSession = Depends(get_db), - hard_delete: bool = True, -) -> BaseResponse: - """Delete all historical conversations for the current user - - Args: - current_user: Current authenticated user - db: Database session - hard_delete: Whether to hard delete (permanent), defaults to True - - Returns: - BaseResponse: Delete result - """ - # Get all conversations for the current user - result = await db.execute( - select(Conversation).where(Conversation.user_id == current_user.id, Conversation.is_active == 1) - ) - conversations = result.scalars().all() - - if not conversations: - return BaseResponse( - success=True, - code=200, - msg="No conversations to delete", - data={"deleted_count": 0}, - ) - - deleted_count = 0 - - if hard_delete: - # Hard delete: remove all conversations and related data - from app.core.agent.checkpointer.checkpointer import delete_thread_checkpoints - - for conversation in conversations: - try: - # delete checkpoints - await delete_thread_checkpoints(conversation.thread_id) - except Exception as e: - logger.warning(f"Failed to delete checkpoints for {conversation.thread_id}: {e}") - - # delete conversation (messages are cascade-deleted) - await db.delete(conversation) - deleted_count += 1 - else: - # Soft delete: mark all conversations as inactive - for conversation in conversations: - conversation.is_active = 0 - deleted_count += 1 - - await db.commit() - - return BaseResponse( - success=True, - code=200, - msg=f"Deleted {deleted_count} conversations successfully", - data={ - "deleted_count": deleted_count, - "hard_delete": hard_delete, - }, - ) - - -@router.get( - "/{thread_id}", - response_model=BaseResponse[ConversationDetailResponse], - summary="Get conversation details", - description="Get conversation details by thread_id for the current user.", - responses={ - 401: {"description": "Unauthorized"}, - 404: {"description": "Conversation not found"}, - 500: {"description": "Internal server error"}, - }, -) -async def get_conversation( - thread_id: str, - current_user: CurrentUser, - db: AsyncSession = Depends(get_db), -) -> BaseResponse[ConversationDetailResponse]: - """ - Get a single conversation's details - - Args: - thread_id: Thread ID - current_user: Current user - db: Database session - - Returns: - BaseResponse[ConversationDetailResponse]: Conversation details - """ - # Verify conversation ownership - conversation = await verify_conversation_ownership(thread_id, current_user.id, db) - - messages_result = await db.execute( - select(Message).where(Message.thread_id == thread_id).order_by(Message.created_at) - ) - messages = messages_result.scalars().all() - - conv_response = ConversationResponse( - id=conversation.id, - thread_id=conversation.thread_id, - user_id=conversation.user_id, - title=conversation.title, - metadata=conversation.meta_data or {}, - created_at=conversation.created_at, - updated_at=conversation.updated_at, - message_count=len(messages), - ) - - messages_data = [ - { - "id": msg.id, - "role": msg.role, - "content": msg.content, - "metadata": msg.meta_data or {}, - "created_at": msg.created_at.isoformat(), - } - for msg in messages - ] - - return BaseResponse( - success=True, - code=200, - msg="Fetched conversation details successfully", - data=ConversationDetailResponse(conversation=conv_response, messages=messages_data), - ) - - -@router.patch( - "/{thread_id}", - response_model=BaseResponse[dict], - summary="Update conversation", - description="Update conversation title and/or metadata.", - responses={ - 401: {"description": "Unauthorized"}, - 404: {"description": "Conversation not found"}, - 500: {"description": "Internal server error"}, - }, -) -async def update_conversation( - thread_id: str, - update: ConversationUpdate, - current_user: CurrentUser, - db: AsyncSession = Depends(get_db), -) -> BaseResponse[dict]: - """ - Update conversation information - - Args: - thread_id: Thread ID - update: Update payload - current_user: Current user - db: Database session - - Returns: - BaseResponse[dict]: Update status - """ - # Verify conversation ownership - conversation = await verify_conversation_ownership(thread_id, current_user.id, db) - - if update.title is not None: - conversation.title = update.title - if update.metadata is not None: - conversation.meta_data = update.metadata - - conversation.updated_at = utc_now() - await db.commit() - - return BaseResponse( - success=True, - code=200, - msg="Conversation updated successfully", - data={"status": "updated", "thread_id": thread_id}, - ) - - -@router.delete( - "/{thread_id}", - response_model=BaseResponse[dict], - summary="Delete conversation", - description="Delete a conversation (soft delete or hard delete). Hard delete removes all related data.", - responses={ - 401: {"description": "Unauthorized"}, - 404: {"description": "Conversation not found"}, - 500: {"description": "Internal server error"}, - }, -) -async def delete_conversation( - thread_id: str, - current_user: CurrentUser, - hard_delete: bool = True, - db: AsyncSession = Depends(get_db), -): - """ - Delete conversation (soft or hard delete), hard delete by default - - Args: - thread_id: Thread ID - hard_delete: Whether to hard delete - current_user: Current user - db: Database session - - Returns: - BaseResponse[dict]: Delete status - """ - # Verify conversation ownership - conversation = await verify_conversation_ownership(thread_id, current_user.id, db) - - if hard_delete: - # Hard delete: remove all related data - # Delete checkpoints first - from app.core.agent.checkpointer.checkpointer import delete_thread_checkpoints - - try: - await delete_thread_checkpoints(thread_id) - except Exception as e: - logger.warning(f"Failed to delete checkpoints: {e}") - - # Delete conversation (messages are cascade-deleted) - await db.delete(conversation) - else: - # Soft delete - conversation.is_active = 0 - - await db.commit() - return BaseResponse( - success=True, - code=200, - msg="Conversation deleted successfully", - data={"status": "deleted", "thread_id": thread_id}, - ) - - -@router.post( - "/{thread_id}/reset", - response_model=BaseResponse[dict], - summary="Reset conversation", - description="Clear all checkpoints and messages, but keep the conversation record.", - responses={ - 401: {"description": "Unauthorized"}, - 404: {"description": "Conversation not found"}, - 500: {"description": "Internal server error"}, - }, -) -async def reset_conversation( - thread_id: str, - current_user: CurrentUser, - db: AsyncSession = Depends(get_db), -) -> BaseResponse[dict]: - """ - Reset conversation: clear all checkpoints and messages, but keep the conversation record - - After reset, the conversation returns to the initial state and can start over. - - Args: - thread_id: Thread ID - current_user: Current user - db: Database session - - Returns: - BaseResponse[dict]: Reset status - """ - # Verify conversation ownership - conversation = await verify_conversation_ownership(thread_id, current_user.id, db) - - try: - # 1. Delete LangGraph checkpoints - from app.core.agent.checkpointer.checkpointer import delete_thread_checkpoints - - await delete_thread_checkpoints(thread_id) - logger.info(f"✅ Deleted LangGraph checkpoints for thread: {thread_id}") - - # 2. Delete all message records - result = await db.execute(delete(Message).where(Message.thread_id == thread_id)) - # get deleted row count (SQLAlchemy 2.0+ Result has rowcount attribute) - deleted_count = getattr(result, "rowcount", 0) - logger.info(f"✅ Deleted {deleted_count} messages for thread: {thread_id}") - - # 3. Update conversation timestamp - conversation.updated_at = utc_now() - - await db.commit() - - return BaseResponse( - success=True, - code=200, - msg=f"Conversation reset; deleted {deleted_count} messages", - data={ - "status": "reset", - "thread_id": thread_id, - "deleted_count": deleted_count, - }, - ) - - except Exception as e: - await db.rollback() - logger.error(f"Failed to reset conversation {thread_id}: {e}") - raise InternalServerException("Failed to reset conversation") from e - - -# ==================== Message management endpoints ==================== - - -@router.get( - "/{thread_id}/messages", - response_model=BaseResponse[PageResult[ConversationMessageResponse]], - summary="List conversation messages", - description="Get a paginated list of messages in the conversation.", - responses={ - 401: {"description": "Unauthorized"}, - 404: {"description": "Conversation not found"}, - 500: {"description": "Internal server error"}, - }, -) -async def get_messages( - thread_id: str, - current_user: CurrentUser, - page_query: ConversationMessagesPaginationParams = Depends(), - db: AsyncSession = Depends(get_db), -) -> BaseResponse[PageResult[ConversationMessageResponse]]: - """ - Get conversation message history - - Args: - thread_id: Thread ID - current_user: Current user - page_query: Pagination parameters (page, page_size) - db: Database session - - Returns: - BaseResponse[PageResult[ConversationMessageResponse]]: Paginated message list - """ - # Verify conversation ownership - await verify_conversation_ownership(thread_id, current_user.id, db) - - paginator = Paginator(db) - page_result = await paginator.paginate( - select(Message).where(Message.thread_id == thread_id).order_by(Message.created_at.desc()), - page_query, - ) - messages = page_result.items - - message_list = [ - ConversationMessageResponse( - id=msg.id, - role=msg.role, - content=msg.content, - metadata=msg.meta_data or {}, - created_at=msg.created_at, - ) - for msg in reversed(list(messages)) - ] - - logger.debug(f"Loaded {len(message_list)} messages for thread {thread_id}") - - return BaseResponse( - success=True, - code=200, - msg="Fetched message list successfully", - data=PageResult( - items=message_list, - total=page_result.total, - page=page_result.page, - page_size=page_result.page_size, - pages=page_result.pages, - ), - ) - - -@router.get( - "/{thread_id}/checkpoints", - response_model=BaseResponse[CheckpointResponse], - summary="Get conversation checkpoints", - description="Retrieve checkpoints from LangGraph state history.", - responses={ - 401: {"description": "Unauthorized"}, - 404: {"description": "Conversation not found"}, - 500: {"description": "Internal server error"}, - }, -) -async def get_checkpoints( - thread_id: str, - current_user: CurrentUser, - limit: int = 10, - db: AsyncSession = Depends(get_db), -) -> BaseResponse[CheckpointResponse]: - """ - Get all conversation checkpoints - - Args: - thread_id: Thread ID - limit: Number of checkpoints to return - - Returns: - BaseResponse[CheckpointResponse]: Checkpoints response - """ - # Verify conversation ownership - await verify_conversation_ownership(thread_id, current_user.id, db) - - config: RunnableConfig = {"configurable": {"thread_id": thread_id, "user_id": str(current_user.id)}} - try: - checkpointer = get_checkpointer() - if not checkpointer: - raise RuntimeError("Checkpointer not initialized") - checkpoints = [] - async for checkpoint_tuple in checkpointer.alist(config): - cp_config = checkpoint_tuple.config or {} - cp = checkpoint_tuple.checkpoint or {} - checkpoints.append( - { - "checkpoint_id": cp_config.get("configurable", {}).get("checkpoint_id"), - "values": cp.get("channel_values", {}), - "next": [], - "metadata": checkpoint_tuple.metadata, - "created_at": checkpoint_tuple.metadata.get("created_at") if checkpoint_tuple.metadata else None, - } - ) - if len(checkpoints) >= limit: - break - - return BaseResponse( - success=True, - code=200, - msg="Fetched checkpoints successfully", - data=CheckpointResponse(thread_id=thread_id, checkpoints=checkpoints), - ) - except Exception as e: - logger.error(f"Get checkpoints error: {e}") - raise InternalServerException("Failed to fetch checkpoints") from e - - -# ==================== Export/Import endpoints ==================== - - -@router.get("/{thread_id}/export", response_model=BaseResponse[ConversationExportResponse], include_in_schema=False) -async def export_conversation( - thread_id: str, - current_user: CurrentUser, - db: AsyncSession = Depends(get_db), -): - """ - Export conversation data - - Args: - thread_id: Thread ID - current_user: Current user - db: Database session - - Returns: - BaseResponse[ConversationExportResponse]: Exported data - """ - # Verify conversation ownership - conversation = await verify_conversation_ownership(thread_id, current_user.id, db) - - messages_result = await db.execute( - select(Message).where(Message.thread_id == thread_id).order_by(Message.created_at) - ) - messages = messages_result.scalars().all() - - # Get LangGraph state - config: RunnableConfig = {"configurable": {"thread_id": thread_id, "user_id": str(current_user.id)}} - try: - checkpointer = get_checkpointer() - if checkpointer: - checkpoint_tuple = await checkpointer.aget_tuple(config) - if checkpoint_tuple and checkpoint_tuple.checkpoint: - state_values = checkpoint_tuple.checkpoint.get("channel_values", {}) - else: - state_values = None - else: - state_values = None - except Exception: - state_values = None - - return BaseResponse( - success=True, - code=200, - msg="Conversation exported successfully", - data=ConversationExportResponse( - conversation={ - "thread_id": conversation.thread_id, - "user_id": conversation.user_id, - "title": conversation.title, - "metadata": conversation.meta_data or {}, - "created_at": conversation.created_at.isoformat(), - "updated_at": conversation.updated_at.isoformat(), - }, - messages=[ - { - "role": msg.role, - "content": msg.content, - "metadata": msg.meta_data or {}, - "created_at": msg.created_at.isoformat(), - } - for msg in messages - ], - state=state_values, - ), - ) - - -@router.post("/import", include_in_schema=False) -async def import_conversation( - request: ConversationImportRequest, - current_user: CurrentUser, - db: AsyncSession = Depends( - get_db, - ), -): - """ - Import conversation data - - Args: - request: Import request - current_user: Current user - db: Database session - - Returns: - BaseResponse[dict]: Import status - """ - data = request.data - thread_id = str(uuid.uuid4()) - - # Create conversation - conversation = Conversation( - thread_id=thread_id, - user_id=current_user.id, - title=data["conversation"]["title"], - meta_data=data["conversation"].get("metadata", {}), - ) - db.add(conversation) - - # Import messages - for msg_data in data["messages"]: - message = Message( - thread_id=thread_id, - role=msg_data["role"], - content=msg_data["content"], - meta_data=msg_data.get("metadata", {}), - ) - db.add(message) - - await db.commit() - - # Restore LangGraph state (best-effort) - if "state" in data and data["state"]: - config: RunnableConfig = {"configurable": {"thread_id": thread_id, "user_id": str(current_user.id)}} - try: - checkpointer = get_checkpointer() - if checkpointer: - import uuid as _uuid - - from langgraph.checkpoint.base import empty_checkpoint - - checkpoint = empty_checkpoint() - checkpoint["id"] = str(_uuid.uuid4()) - checkpoint["channel_values"] = data["state"] - await checkpointer.aput(config, checkpoint, {"source": "input"}, {}) - except Exception as e: - logger.warning(f"Could not restore state: {e}") - - return BaseResponse( - success=True, - code=200, - msg="Conversation imported successfully", - data={"thread_id": thread_id, "status": "imported"}, - ) - - -# ==================== Search endpoints ==================== - - -@router.post( - "/search", - response_model=BaseResponse[SearchResponse], - summary="Search conversations and messages", - description="Search messages content and related conversation titles for the current user.", - responses={ - 401: {"description": "Unauthorized"}, - 500: {"description": "Internal server error"}, - }, -) -async def search_conversations( - request: SearchRequest, - current_user: CurrentUser, - db: AsyncSession = Depends(get_db), -) -> BaseResponse[SearchResponse]: - """ - Search conversations and messages - - Args: - request: Search request - current_user: Current user - db: Database session - - Returns: - BaseResponse[SearchResponse]: Search results - """ - # Use SQLite LIKE search - result = await db.execute( - select(Message) - .join(Conversation, Message.thread_id == Conversation.thread_id) - .where(Message.content.like(f"%{request.query}%"), Conversation.user_id == current_user.id) - .order_by(Message.created_at.desc()) - .offset(request.skip) - .limit(request.limit) - ) - messages = result.scalars().all() - - results = [] - for msg in messages: - conv_result = await db.execute(select(Conversation).where(Conversation.thread_id == msg.thread_id)) - conversation = conv_result.scalar_one_or_none() - - results.append( - { - "message_id": msg.id, - "thread_id": msg.thread_id, - "conversation_title": conversation.title if conversation else "", - "role": msg.role, - "content": msg.content, - "created_at": msg.created_at.isoformat(), - } - ) - - return BaseResponse( - success=True, - code=200, - msg="Search completed", - data=SearchResponse(query=request.query, results=results), - ) - - -# ==================== Statistics endpoints ==================== - - -@router.get( - "/users/stats", - response_model=BaseResponse[UserStatsResponse], - summary="Get user statistics", - description="Get statistics about the current user's conversations and messages.", - responses={ - 401: {"description": "Unauthorized"}, - 500: {"description": "Internal server error"}, - }, -) -async def get_user_stats( - current_user: CurrentUser, - db: AsyncSession = Depends(get_db), -) -> BaseResponse[UserStatsResponse]: - """ - Get user statistics - - Args: - current_user: Current user - db: Database session - - Returns: - BaseResponse[UserStatsResponse]: User statistics - """ - # total conversations - conv_result = await db.execute( - select(func.count(Conversation.id)).where(Conversation.user_id == current_user.id, Conversation.is_active == 1) - ) - total_conversations = conv_result.scalar() or 0 - - # total messages - msg_result = await db.execute( - select(func.count(Message.id)) - .join(Conversation, Message.thread_id == Conversation.thread_id) - .where(Conversation.user_id == current_user.id) - ) - total_messages = msg_result.scalar() or 0 - - # recent conversations - recent_result = await db.execute( - select(Conversation) - .where(Conversation.user_id == current_user.id, Conversation.is_active == 1) - .order_by(Conversation.updated_at.desc()) - .limit(5) - ) - recent_conversations = recent_result.scalars().all() - - return BaseResponse( - success=True, - code=200, - msg="Fetched statistics successfully", - data=UserStatsResponse( - user_id=str(current_user.id), - total_conversations=total_conversations, - total_messages=total_messages, - recent_conversations=[ - {"thread_id": conv.thread_id, "title": conv.title, "updated_at": conv.updated_at.isoformat()} - for conv in recent_conversations - ], - ), - ) diff --git a/backend/app/api/v1/copilot.py b/backend/app/api/v1/copilot.py new file mode 100644 index 000000000..b66c8f11e --- /dev/null +++ b/backend/app/api/v1/copilot.py @@ -0,0 +1,48 @@ +"""Copilot API — execution-engine dispatch.""" + +from fastapi import APIRouter, Depends +from sqlalchemy.ext.asyncio import AsyncSession + +from app.common.dependencies import CurrentUser +from app.core.database import get_db +from app.schemas import BaseResponse +from app.schemas.copilot import CopilotRunRequest, CopilotRunResponse +from app.services.dispatch_service import DispatchService + +router = APIRouter(prefix="/v1/copilot", tags=["copilot"]) + + +@router.post("/run", response_model=BaseResponse[CopilotRunResponse]) +async def copilot_run( + body: CopilotRunRequest, + current_user: CurrentUser, + db: AsyncSession = Depends(get_db), +): + """Dispatch copilot through the execution engine for persistent history. + + Returns run_id + execution_id. Subscribe to the execution WebSocket + for real-time events (same as any other execution). + """ + dispatch = DispatchService(db) + run = await dispatch.dispatch_copilot_draft( + agent_id=body.agent_id, + version_id=body.version_id, + workspace_id=body.workspace_id, + prompt=body.prompt, + user_id=str(current_user.id), + graph_context=body.graph_context, + conversation_history=body.conversation_history, + mode=body.mode, + provider_name=body.provider_name, + model_name=body.model_name, + ) + + return BaseResponse( + success=True, + code=200, + msg="Copilot run created", + data=CopilotRunResponse( + run_id=str(run.id), + execution_id=str(run.current_execution_id), + ), + ) diff --git a/backend/app/api/v1/custom_tools.py b/backend/app/api/v1/custom_tools.py index 96d24371e..ff7f8028f 100644 --- a/backend/app/api/v1/custom_tools.py +++ b/backend/app/api/v1/custom_tools.py @@ -13,6 +13,7 @@ from pydantic import BaseModel, ConfigDict, Field from sqlalchemy.ext.asyncio import AsyncSession +from app.common.app_errors import AccessDeniedError, NotFoundError from app.common.dependencies import get_current_user from app.core.database import get_db from app.models.auth import AuthUser as User @@ -95,10 +96,14 @@ async def get_custom_tool( service = CustomToolService(db) tool = await service.repo.get(tool_id) if not tool: - return {"success": False, "error": "Not found"} + raise NotFoundError("Custom tool not found", code="CUSTOM_TOOL_NOT_FOUND", data={"tool_id": str(tool_id)}) # verify ownership if tool.owner_id != current_user.id: - return {"success": False, "error": "Forbidden"} + raise AccessDeniedError( + "You can only view your own tools", + code="CUSTOM_TOOL_VIEW_FORBIDDEN", + data={"tool_id": str(tool_id)}, + ) return {"success": True, "data": _serialize(tool)} diff --git a/backend/app/api/v1/executions.py b/backend/app/api/v1/executions.py new file mode 100644 index 000000000..0808064c4 --- /dev/null +++ b/backend/app/api/v1/executions.py @@ -0,0 +1,257 @@ +"""Executions API.""" + +from __future__ import annotations + +import uuid +from typing import Any, Dict, List, Optional + +from fastapi import APIRouter, Depends, Query +from pydantic import BaseModel +from sqlalchemy import select +from sqlalchemy.ext.asyncio import AsyncSession + +from app.common.app_errors import AccessDeniedError, InvalidRequestError, NotFoundError +from app.common.dependencies import CurrentUser, get_current_user +from app.core.database import get_db +from app.models.agent_run import AgentRun +from app.models.auth import AuthUser as User +from app.models.execution import Artifact, Execution +from app.models.workspace import WorkspaceMemberRole +from app.schemas import BaseResponse +from app.schemas.artifact import ArtifactResponse +from app.schemas.execution import ( + ExecutionEventItemResponse, + ExecutionEventResponse, + ExecutionEventsPageResponse, + ExecutionResponse, +) +from app.schemas.task import InjectMessageRequest +from app.services.dispatch_service import DispatchService +from app.services.execution_service import ExecutionService +from app.services.workspace_permission import check_workspace_access + + +class DebugRunRequest(BaseModel): + agent_version_id: uuid.UUID + agent_id: uuid.UUID + prompt: str + workspace_id: uuid.UUID + variables: Optional[Dict[str, Any]] = None + + +router = APIRouter(prefix="/v1/executions", tags=["Executions"]) + + +@router.post("/debug", response_model=BaseResponse) +async def dispatch_debug_run( + body: DebugRunRequest, + current_user: CurrentUser, + db: AsyncSession = Depends(get_db), +) -> BaseResponse: + """Start a debug run with observation tracing enabled.""" + from app.services.execution_orchestrator import ExecutionOrchestrator + + has_access = await check_workspace_access(db, body.workspace_id, current_user, WorkspaceMemberRole.member) + if not has_access: + raise AccessDeniedError("Insufficient workspace permission", code="WORKSPACE_PERMISSION_DENIED") + + orchestrator = ExecutionOrchestrator(db) + run = await orchestrator.dispatch_debug( + agent_id=body.agent_id, + version_id=body.agent_version_id, + prompt=body.prompt, + user_id=str(current_user.id), + workspace_id=body.workspace_id, + variables=body.variables, + ) + + execution_id = run.current_execution_id + return BaseResponse( + success=True, + code=200, + msg="ok", + data={ + "execution_id": str(execution_id), + "run_id": str(run.id), + "ws_topic": f"execution:{execution_id}", + }, + ) + + +def _to_response(execution) -> ExecutionResponse: + return ExecutionResponse.model_validate(execution) + + +def _event_to_response(event) -> ExecutionEventResponse: + return ExecutionEventResponse.model_validate(event) + + +async def _get_run_workspace_id(db: AsyncSession, run_id: uuid.UUID) -> uuid.UUID | None: + return (await db.execute(select(AgentRun.workspace_id).where(AgentRun.id == run_id))).scalar_one_or_none() + + +async def _get_execution_workspace_id(db: AsyncSession, execution_id: uuid.UUID) -> uuid.UUID | None: + return ( + await db.execute( + select(AgentRun.workspace_id) + .join(Execution, Execution.run_id == AgentRun.id) + .where(Execution.id == execution_id) + ) + ).scalar_one_or_none() + + +async def _require_execution_workspace_access( + db: AsyncSession, + workspace_id: uuid.UUID, + current_user: User, + role: WorkspaceMemberRole, +) -> None: + has_access = await check_workspace_access(db, workspace_id, current_user, role) + if not has_access: + raise AccessDeniedError("Insufficient workspace permission", code="WORKSPACE_PERMISSION_DENIED") + + +@router.get("", response_model=BaseResponse[List[ExecutionResponse]]) +async def list_executions( + current_user: User = Depends(get_current_user), + run_id: uuid.UUID = Query(...), + db: AsyncSession = Depends(get_db), +) -> BaseResponse[List[ExecutionResponse]]: + """List all executions for a run.""" + workspace_id = await _get_run_workspace_id(db, run_id) + if not workspace_id: + raise NotFoundError("Run not found", code="RUN_NOT_FOUND", data={"run_id": str(run_id)}) + await _require_execution_workspace_access(db, workspace_id, current_user, WorkspaceMemberRole.viewer) + + service = ExecutionService(db) + executions = await service.list_executions(run_id) + return BaseResponse( + success=True, + code=200, + msg="ok", + data=[_to_response(e) for e in executions], + ) + + +@router.get("/{execution_id}", response_model=BaseResponse[ExecutionResponse]) +async def get_execution( + execution_id: uuid.UUID, + current_user: User = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> BaseResponse[ExecutionResponse]: + """Get an execution by ID.""" + workspace_id = await _get_execution_workspace_id(db, execution_id) + if not workspace_id: + raise NotFoundError( + "Execution not found", + code="EXECUTION_NOT_FOUND", + data={"execution_id": str(execution_id)}, + ) + await _require_execution_workspace_access(db, workspace_id, current_user, WorkspaceMemberRole.viewer) + + service = ExecutionService(db) + execution = await service.get_execution(execution_id) + return BaseResponse(success=True, code=200, msg="ok", data=_to_response(execution)) + + +@router.get("/{execution_id}/events", response_model=BaseResponse[ExecutionEventsPageResponse]) +async def list_execution_events( + execution_id: uuid.UUID, + after_seq: int = Query(0, ge=0), + current_user: User = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> BaseResponse[ExecutionEventsPageResponse]: + """List execution events after a sequence number.""" + workspace_id = await _get_execution_workspace_id(db, execution_id) + if not workspace_id: + raise NotFoundError( + "Execution not found", + code="EXECUTION_NOT_FOUND", + data={"execution_id": str(execution_id)}, + ) + await _require_execution_workspace_access(db, workspace_id, current_user, WorkspaceMemberRole.viewer) + + service = ExecutionService(db) + events = await service.list_events_after( + execution_id, + str(current_user.id), + after_seq=after_seq, + limit=500, + ) + items = [ + ExecutionEventItemResponse( + id=event.id, + execution_id=event.execution_id, + seq=event.sequence_no, + event_type=event.event_type, + payload=event.payload, + created_at=event.created_at, + ) + for event in events + ] + return BaseResponse( + success=True, + code=200, + msg="ok", + data=ExecutionEventsPageResponse( + execution_id=execution_id, + events=items, + next_after_seq=max((item.seq for item in items), default=after_seq), + ), + ) + + +@router.get("/{execution_id}/artifacts", response_model=BaseResponse[List[ArtifactResponse]]) +async def list_artifacts( + execution_id: uuid.UUID, + current_user: User = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> BaseResponse[List[ArtifactResponse]]: + """List all artifacts for an execution.""" + workspace_id = await _get_execution_workspace_id(db, execution_id) + if not workspace_id: + raise NotFoundError( + "Execution not found", + code="EXECUTION_NOT_FOUND", + data={"execution_id": str(execution_id)}, + ) + await _require_execution_workspace_access(db, workspace_id, current_user, WorkspaceMemberRole.viewer) + + result = await db.execute( + select(Artifact).where(Artifact.execution_id == execution_id).order_by(Artifact.created_at) + ) + artifacts = list(result.scalars().all()) + return BaseResponse( + success=True, + code=200, + msg="ok", + data=[ArtifactResponse.model_validate(a) for a in artifacts], + ) + + +@router.post("/{execution_id}/message", response_model=BaseResponse) +async def inject_message( + execution_id: uuid.UUID, + body: InjectMessageRequest, + current_user: User = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +) -> BaseResponse: + """Inject a message into a running execution.""" + workspace_id = await _get_execution_workspace_id(db, execution_id) + if not workspace_id: + raise NotFoundError( + "Execution not found", + code="EXECUTION_NOT_FOUND", + data={"execution_id": str(execution_id)}, + ) + + await _require_execution_workspace_access(db, workspace_id, current_user, WorkspaceMemberRole.member) + + dispatch = DispatchService(db) + try: + await dispatch.send_message(execution_id, body.message) + except NotImplementedError as exc: + raise InvalidRequestError(str(exc), code="EXECUTION_MESSAGE_UNSUPPORTED") + except RuntimeError as exc: + raise InvalidRequestError(str(exc), code="EXECUTION_MESSAGE_REJECTED") + return BaseResponse(success=True, code=200, msg="ok", data={"status": "sent"}) diff --git a/backend/app/api/v1/files.py b/backend/app/api/v1/files.py index bbd6558b7..66b932149 100644 --- a/backend/app/api/v1/files.py +++ b/backend/app/api/v1/files.py @@ -31,14 +31,16 @@ import asyncio import base64 import mimetypes +import mimetypes as mimetypes_module from pathlib import Path -from fastapi import APIRouter, File, Request, UploadFile +from fastapi import APIRouter, File, Query, Request, UploadFile +from fastapi.responses import Response from loguru import logger from pydantic import BaseModel +from app.common.app_errors import AppError, InternalServiceError, InvalidRequestError, NotFoundError from app.common.dependencies import CurrentUser -from app.common.exceptions import AppException, BadRequestException, InternalServerException, NotFoundException from app.core.agent.backends.constants import ( DEFAULT_WORKING_DIR, SANDBOX_UPLOADS_SUBDIR, @@ -166,8 +168,10 @@ def _validate_file_content(filename: str, content: bytes) -> None: content_start = content[: max(len(sig) for sig in expected_signatures)] if not any(content_start.startswith(sig) for sig in expected_signatures): logger.warning(f"File content validation failed for {filename}: got {content_start[:16].hex()}") - raise BadRequestException( + raise InvalidRequestError( f"File content does not match declared type: {file_ext}", + code="FILE_CONTENT_TYPE_MISMATCH", + data={"filename": filename, "file_type": file_ext}, ) @@ -175,7 +179,11 @@ def _validate_file_type(filename: str, content_type: str | None) -> None: """Validate file type (extension and MIME type).""" file_ext = Path(filename).suffix.lower() if file_ext and file_ext not in ALLOWED_EXTENSIONS: - raise BadRequestException(f"File type {file_ext} is not supported") + raise InvalidRequestError( + f"File type {file_ext} is not supported", + code="FILE_TYPE_UNSUPPORTED", + data={"filename": filename, "file_type": file_ext}, + ) if content_type: inferred_type, _ = mimetypes.guess_type(filename) if inferred_type and content_type != inferred_type: @@ -198,26 +206,32 @@ def _validate_file_upload( filename: str, content: bytes, content_type: str | None, -) -> tuple[str, None] | tuple[None, BadRequestException]: +) -> tuple[str, None] | tuple[None, InvalidRequestError]: """Validate file upload (size, type, content). Returns (safe_filename, None) or (None, error).""" if len(content) == 0: - return None, BadRequestException("File cannot be empty") + return None, InvalidRequestError( + "File cannot be empty", + code="FILE_EMPTY", + data={"filename": filename}, + ) if len(content) > MAX_FILE_SIZE_BYTES: - return None, BadRequestException( - f"File size exceeds maximum allowed size ({MAX_FILE_SIZE_BYTES / 1024 / 1024}MB)" + return None, InvalidRequestError( + f"File size exceeds maximum allowed size ({MAX_FILE_SIZE_BYTES / 1024 / 1024}MB)", + code="FILE_TOO_LARGE", + data={"filename": filename, "max_size_bytes": MAX_FILE_SIZE_BYTES}, ) safe_filename = sanitize_filename(filename) try: _validate_file_type(safe_filename, content_type) - except BadRequestException as e: + except InvalidRequestError as e: return None, e try: _validate_file_content(safe_filename, content) - except BadRequestException as e: + except InvalidRequestError as e: return None, e return safe_filename, None @@ -264,7 +278,11 @@ async def upload_file( await asyncio.to_thread(handle.adapter.mkdir, CONTAINER_UPLOADS_PATH) result = await asyncio.to_thread(handle.adapter.write_overwrite, container_path, content) if getattr(result, "error", None): - raise InternalServerException(f"Failed to write file: {result.error}") + raise InternalServiceError( + "Failed to write file", + code="FILE_WRITE_FAILED", + data={"filename": safe_filename, "error": str(result.error)}, + ) logger.info( f"File uploaded to sandbox: user={current_user.id}, " @@ -282,14 +300,18 @@ async def upload_file( message=f"File {safe_filename} has been uploaded to your working directory", ), ) - except AppException: + except AppError: raise except Exception as e: logger.error( f"Failed to upload file: user={current_user.id}, filename={original_filename}, ip={client_ip}, error={e}", exc_info=True, ) - raise InternalServerException("Failed to upload file, please try again later") from e + raise InternalServiceError( + "Failed to upload file, please try again later", + code="FILE_UPLOAD_FAILED", + data={"filename": original_filename}, + ) from e @router.get( @@ -326,20 +348,30 @@ async def list_files(current_user: CurrentUser) -> BaseResponse[FileListResponse ) except Exception as e: logger.error(f"Failed to list files: {e}", exc_info=True) - raise InternalServerException("Failed to list files, please try again later") from e + raise InternalServiceError( + "Failed to list files, please try again later", + code="FILE_LIST_FAILED", + data={"user_id": str(current_user.id)}, + ) from e @router.get( "/read/{filename}", - response_model=BaseResponse[dict], summary="Read file content", - description="Read the content of a file in the user's sandbox upload directory.", + description="Read the content of a file in the user's sandbox upload directory. " + "Use mode=raw to get raw binary content with correct Content-Type header.", + response_model=None, responses={ 404: {"description": "File not found"}, 500: {"description": "Failed to read file"}, }, ) -async def read_file(request: Request, filename: str, current_user: CurrentUser) -> BaseResponse[dict]: +async def read_file( + request: Request, + filename: str, + current_user: CurrentUser, + mode: str = Query("json", description="'json' (default) or 'raw'"), +) -> BaseResponse[dict] | Response: """Read file content from the user's sandbox via adapter API.""" client_ip = get_client_ip(request) @@ -347,11 +379,41 @@ async def read_file(request: Request, filename: str, current_user: CurrentUser) safe_filename = sanitize_filename(filename) container_path = get_container_path(safe_filename) + if mode == "raw": + # Raw mode: return binary bytes with correct Content-Type header + async with await _get_sandbox_handle(str(current_user.id)) as handle: + content = await asyncio.to_thread(handle.adapter.raw_read, container_path) + + if content.startswith("[Error:") or content.startswith("Error:"): + raise NotFoundError( + "File not found", + code="FILE_NOT_FOUND", + data={"filename": safe_filename}, + ) + + # raw_read returns text; encode back to bytes via latin-1 to preserve binary data + content_bytes = content.encode("latin-1") + + mime, _ = mimetypes_module.guess_type(safe_filename) + + logger.info(f"File read (raw): user={current_user.id}, filename={safe_filename}, ip={client_ip}") + + return Response( + content=content_bytes, + media_type=mime or "application/octet-stream", + headers={"Content-Disposition": f'inline; filename="{safe_filename}"'}, + ) + + # Default JSON mode async with await _get_sandbox_handle(str(current_user.id)) as handle: content = await asyncio.to_thread(handle.adapter.raw_read, container_path) if content.startswith("[Error:") or content.startswith("Error:"): - raise NotFoundException("File not found") + raise NotFoundError( + "File not found", + code="FILE_NOT_FOUND", + data={"filename": safe_filename}, + ) # raw_read returns text; for binary files it may be garbled is_binary = False @@ -369,14 +431,18 @@ async def read_file(request: Request, filename: str, current_user: CurrentUser) msg="Read file successfully", data={"filename": safe_filename, "content": content, "is_binary": is_binary}, ) - except AppException: + except AppError: raise except Exception as e: logger.error( f"Failed to read file: user={current_user.id}, filename={filename}, ip={client_ip}, error={e}", exc_info=True, ) - raise InternalServerException("Failed to read file, please try again later") from e + raise InternalServiceError( + "Failed to read file, please try again later", + code="FILE_READ_FAILED", + data={"filename": filename}, + ) from e @router.delete( @@ -401,7 +467,11 @@ async def delete_file(request: Request, filename: str, current_user: CurrentUser ok = await asyncio.to_thread(handle.adapter.delete, container_path) if not ok: - raise NotFoundException(f"File not found: {filename}") + raise NotFoundError( + "File not found", + code="FILE_NOT_FOUND", + data={"filename": safe_filename}, + ) logger.info(f"File deleted: user={current_user.id}, filename={safe_filename}, ip={client_ip}") @@ -411,14 +481,18 @@ async def delete_file(request: Request, filename: str, current_user: CurrentUser msg="File deleted successfully", data={"filename": safe_filename, "message": f"File {safe_filename} has been deleted"}, ) - except AppException: + except AppError: raise except Exception as e: logger.error( f"Failed to delete file: user={current_user.id}, filename={filename}, ip={client_ip}, error={e}", exc_info=True, ) - raise InternalServerException("Failed to delete file, please try again later") from e + raise InternalServiceError( + "Failed to delete file, please try again later", + code="FILE_DELETE_FAILED", + data={"filename": filename}, + ) from e @router.delete( @@ -448,8 +522,12 @@ async def clear_all_files(request: Request, current_user: CurrentUser) -> BaseRe msg="Cleared files successfully", data={"message": "Cleared working directory"}, ) - except AppException: + except AppError: raise except Exception as e: logger.error(f"Failed to clear files: user={current_user.id}, ip={client_ip}, error={e}", exc_info=True) - raise InternalServerException("Failed to clear files, please try again later") from e + raise InternalServiceError( + "Failed to clear files, please try again later", + code="FILE_CLEAR_FAILED", + data={"user_id": str(current_user.id)}, + ) from e diff --git a/backend/app/api/v1/graph_code.py b/backend/app/api/v1/graph_code.py deleted file mode 100644 index 9500b9af8..000000000 --- a/backend/app/api/v1/graph_code.py +++ /dev/null @@ -1,190 +0,0 @@ -"""Graph Code API — save and run user LangGraph code. - -Routes are nested under ``/api/v1/graphs`` and add code-specific -operations as sub-resources of an existing graph: - -- ``POST /api/v1/graphs/{graph_id}/code/save`` — persist code -- ``POST /api/v1/graphs/{graph_id}/code/run`` — execute code and return result -""" - -import asyncio -import re -import uuid -from typing import Any, Dict, Optional - -from fastapi import APIRouter, Depends -from loguru import logger -from pydantic import BaseModel, Field -from sqlalchemy.ext.asyncio import AsyncSession - -from app.common.dependencies import get_current_user -from app.common.exceptions import NotFoundException -from app.core.code_executor import execute_code -from app.core.database import get_db -from app.models.auth import AuthUser as User -from app.models.workspace import WorkspaceMemberRole -from app.services.graph_service import GraphService - -router = APIRouter(prefix="/v1/graphs", tags=["Graph Code"]) - -# Execution timeout for ainvoke (seconds) -RUN_TIMEOUT = 30.0 - - -# --------------------------------------------------------------------------- -# Request / response models -# --------------------------------------------------------------------------- - - -class CodeSaveRequest(BaseModel): - code: str = Field(..., description="Python code to save") - name: Optional[str] = Field(default=None, description="Optional graph name update") - - -class CodeRunRequest(BaseModel): - input: Optional[Dict[str, Any]] = Field( - default=None, - description="Initial state input for the graph", - ) - - -# --------------------------------------------------------------------------- -# Helpers -# --------------------------------------------------------------------------- - - -def _sanitize_error(msg: str) -> str: - """Remove server file paths from error messages.""" - return re.sub(r"/[^\s\"']+/", "/", msg) - - -# --------------------------------------------------------------------------- -# Endpoints -# --------------------------------------------------------------------------- - - -@router.post("/{graph_id}/code/save") -async def save_code( - graph_id: uuid.UUID, - payload: CodeSaveRequest, - db: AsyncSession = Depends(get_db), - current_user: User = Depends(get_current_user), -): - """Persist user code to ``graph.variables``. - - Requires member (write) permission. - """ - service = GraphService(db) - graph = await service.graph_repo.get(graph_id) - if not graph: - raise NotFoundException(f"Graph {graph_id} not found") - - # Permission check: need member role to save - await service._ensure_access(graph, current_user, WorkspaceMemberRole.member) - - variables = dict(graph.variables or {}) - variables["graph_mode"] = "code" - variables["code_content"] = payload.code - graph.variables = variables - - if payload.name is not None: - graph.name = payload.name - - await db.commit() - - logger.info( - f"[GraphCodeAPI] Saved code | graph_id={graph_id} | code_len={len(payload.code)} | user={current_user.id}" - ) - return {"success": True} - - -@router.post("/{graph_id}/code/run") -async def run_code( - graph_id: uuid.UUID, - payload: CodeRunRequest, - db: AsyncSession = Depends(get_db), - current_user: User = Depends(get_current_user), -): - """Execute user code: exec → StateGraph → compile → invoke. - - Requires viewer permission. Execution has a 30s timeout. - """ - service = GraphService(db) - graph = await service.graph_repo.get(graph_id) - if not graph: - raise NotFoundException(f"Graph {graph_id} not found") - - # Permission check: need viewer role to run - await service._ensure_access(graph, current_user, WorkspaceMemberRole.viewer) - - code = (graph.variables or {}).get("code_content", "") - if not code.strip(): - return { - "success": False, - "message": "No code to execute. Save your code first.", - } - - try: - # Step 1: exec user code → get StateGraph (has its own 10s timeout) - state_graph = execute_code(code) - - # Step 2: compile - compiled = state_graph.compile() - - # Step 3: invoke with timeout - initial_state = payload.input or {} - result = await asyncio.wait_for( - compiled.ainvoke(initial_state), # type: ignore[arg-type] - timeout=RUN_TIMEOUT, - ) - - logger.info(f"[GraphCodeAPI] Code run success | graph_id={graph_id}") - return { - "success": True, - "data": { - "result": _serialize_result(result), - }, - } - - except SyntaxError as e: - return { - "success": False, - "message": f"Syntax error at line {e.lineno}: {e.msg}", - } - except ImportError as e: - return { - "success": False, - "message": str(e), - } - except TimeoutError: - return { - "success": False, - "message": "Execution timed out. Check for infinite loops or long-running operations.", - } - except ValueError as e: - return { - "success": False, - "message": str(e), - } - except Exception as e: - logger.error(f"[GraphCodeAPI] Code run failed | graph_id={graph_id} | error={e}") - return { - "success": False, - "message": _sanitize_error(f"Runtime error: {type(e).__name__}: {e}"), - } - - -def _serialize_result(result: Any) -> Any: - """Best-effort serialization of graph execution result.""" - if result is None: - return None - if isinstance(result, dict): - return {k: _serialize_result(v) for k, v in result.items()} - if isinstance(result, (list, tuple)): - return [_serialize_result(item) for item in result] - if isinstance(result, (str, int, float, bool)): - return result - try: - return str(result) - except Exception: - return f"<{type(result).__name__}>" diff --git a/backend/app/api/v1/graph_deployments.py b/backend/app/api/v1/graph_deployments.py deleted file mode 100644 index 6d0e1c797..000000000 --- a/backend/app/api/v1/graph_deployments.py +++ /dev/null @@ -1,205 +0,0 @@ -""" -Graph deployment version API -""" - -import uuid -from typing import Any, Dict, Optional - -from fastapi import APIRouter, Depends, Request -from loguru import logger -from sqlalchemy.ext.asyncio import AsyncSession - -from app.common.dependencies import get_current_user -from app.core.database import get_db -from app.models.auth import AuthUser as User -from app.schemas.graph_deployment_version import ( - GraphDeploymentVersionListResponse, - GraphDeploymentVersionResponseCamel, - GraphDeploymentVersionStateResponse, - GraphDeployRequest, - GraphRenameVersionRequest, - GraphRevertResponse, -) -from app.services.graph_deployment_version_service import GraphDeploymentVersionService - -router = APIRouter(prefix="/v1/graphs", tags=["Graph Deployments"]) - - -def _bind_log(request: Request, **kwargs): - trace_id = getattr(request.state, "trace_id", "-") - return logger.bind(trace_id=trace_id, **kwargs) - - -@router.get("/{graph_id}/deploy", response_model=Dict[str, Any]) -async def get_deployment_status( - graph_id: uuid.UUID, - request: Request, - current_user: User = Depends(get_current_user), - db: AsyncSession = Depends(get_db), -): - """Get deployment status.""" - log = _bind_log(request, graph_id=str(graph_id)) - log.info("Getting deployment status for graph: {}", graph_id) - - service = GraphDeploymentVersionService(db) - return await service.get_deployment_status(graph_id, current_user) - - -@router.post("/{graph_id}/deploy", response_model=Dict[str, Any]) -async def deploy_graph( - graph_id: uuid.UUID, - request: Request, - body: Optional[GraphDeployRequest] = None, - current_user: User = Depends(get_current_user), - db: AsyncSession = Depends(get_db), -): - """Deploy a graph.""" - log = _bind_log(request, graph_id=str(graph_id)) - log.info("Deploying graph: {}", graph_id) - - service = GraphDeploymentVersionService(db) - name = body.name if body else None - - result = await service.deploy(graph_id, current_user, name) - - return { - "success": result.success, - "message": result.message, - "version": result.version, - "isActive": result.isActive, - "needsRedeployment": result.needsRedeployment, - } - - -@router.delete("/{graph_id}/deploy", response_model=Dict[str, Any]) -async def undeploy_graph( - graph_id: uuid.UUID, - request: Request, - current_user: User = Depends(get_current_user), - db: AsyncSession = Depends(get_db), -): - """Undeploy a graph.""" - log = _bind_log(request, graph_id=str(graph_id)) - log.info("Undeploying graph: {}", graph_id) - - service = GraphDeploymentVersionService(db) - return await service.undeploy(graph_id, current_user) - - -@router.get("/{graph_id}/deployments", response_model=GraphDeploymentVersionListResponse) -async def list_deployment_versions( - graph_id: uuid.UUID, - request: Request, - page: int = 1, - page_size: int = 10, - current_user: User = Depends(get_current_user), - db: AsyncSession = Depends(get_db), -): - """List all deployment versions (paginated).""" - log = _bind_log(request, graph_id=str(graph_id)) - log.info("Listing deployment versions for graph: {} (page={}, page_size={})", graph_id, page, page_size) - - service = GraphDeploymentVersionService(db) - return await service.list_versions(graph_id, current_user, page=page, page_size=page_size) - - -@router.get("/{graph_id}/deployments/{version}", response_model=GraphDeploymentVersionResponseCamel) -async def get_deployment_version( - graph_id: uuid.UUID, - version: int, - request: Request, - current_user: User = Depends(get_current_user), - db: AsyncSession = Depends(get_db), -): - """Get a specific deployment version.""" - log = _bind_log(request, graph_id=str(graph_id), version=version) - log.info("Getting deployment version: {} v{}", graph_id, version) - - service = GraphDeploymentVersionService(db) - return await service.get_version(graph_id, version, current_user) - - -@router.get("/{graph_id}/deployments/{version}/state", response_model=GraphDeploymentVersionStateResponse) -async def get_deployment_version_state( - graph_id: uuid.UUID, - version: int, - request: Request, - current_user: User = Depends(get_current_user), - db: AsyncSession = Depends(get_db), -): - """Get a specific deployment version's full state (for preview).""" - log = _bind_log(request, graph_id=str(graph_id), version=version) - log.info("Getting deployment version state: {} v{}", graph_id, version) - - service = GraphDeploymentVersionService(db) - return await service.get_version_state(graph_id, version, current_user) - - -@router.patch("/{graph_id}/deployments/{version}", response_model=GraphDeploymentVersionResponseCamel) -async def rename_deployment_version( - graph_id: uuid.UUID, - version: int, - body: GraphRenameVersionRequest, - request: Request, - current_user: User = Depends(get_current_user), - db: AsyncSession = Depends(get_db), -): - """Rename a deployment version.""" - log = _bind_log(request, graph_id=str(graph_id), version=version) - log.info("Renaming deployment version: {} v{} to '{}'", graph_id, version, body.name) - - service = GraphDeploymentVersionService(db) - return await service.rename_version(graph_id, version, body.name, current_user) - - -@router.post("/{graph_id}/deployments/{version}/activate", response_model=Dict[str, Any]) -async def activate_deployment_version( - graph_id: uuid.UUID, - version: int, - request: Request, - current_user: User = Depends(get_current_user), - db: AsyncSession = Depends(get_db), -): - """Activate a deployment version.""" - log = _bind_log(request, graph_id=str(graph_id), version=version) - log.info("Activating deployment version: {} v{}", graph_id, version) - - service = GraphDeploymentVersionService(db) - activated = await service.activate_version(graph_id, version, current_user) - - return { - "success": True, - "deployedAt": activated.createdAt, - } - - -@router.post("/{graph_id}/deployments/{version}/revert", response_model=GraphRevertResponse) -async def revert_to_deployment_version( - graph_id: uuid.UUID, - version: int, - request: Request, - current_user: User = Depends(get_current_user), - db: AsyncSession = Depends(get_db), -): - """Revert to a specific deployment version.""" - log = _bind_log(request, graph_id=str(graph_id), version=version) - log.info("Reverting to deployment version: {} v{}", graph_id, version) - - service = GraphDeploymentVersionService(db) - return await service.revert_to_version(graph_id, version, current_user) - - -@router.delete("/{graph_id}/deployments/{version}", response_model=Dict[str, Any]) -async def delete_deployment_version( - graph_id: uuid.UUID, - version: int, - request: Request, - current_user: User = Depends(get_current_user), - db: AsyncSession = Depends(get_db), -): - """Delete a deployment version.""" - log = _bind_log(request, graph_id=str(graph_id), version=version) - log.info("Deleting deployment version: {} v{}", graph_id, version) - - service = GraphDeploymentVersionService(db) - return await service.delete_version(graph_id, version, current_user) diff --git a/backend/app/api/v1/graphs.py b/backend/app/api/v1/graphs.py deleted file mode 100644 index 215538b04..000000000 --- a/backend/app/api/v1/graphs.py +++ /dev/null @@ -1,639 +0,0 @@ -""" -Graph API (path: /api/v1/graphs) -""" - -import time -import uuid -from typing import Any, Dict, List, Optional - -from fastapi import APIRouter, Depends, Query, Request -from loguru import logger -from pydantic import BaseModel, Field -from sqlalchemy import func, select -from sqlalchemy.ext.asyncio import AsyncSession - -from app.common.dependencies import get_current_user -from app.common.exceptions import ForbiddenException, NotFoundException -from app.core.database import get_db -from app.models.auth import AuthUser as User -from app.models.graph import AgentGraph, GraphNode -from app.models.workspace import WorkspaceMemberRole -from app.repositories.agent_run import AgentRunRepository -from app.repositories.workspace import WorkspaceRepository -from app.services.graph_service import GraphService - -router = APIRouter(prefix="/v1/graphs", tags=["Graphs"]) - - -def _bind_log(request: Request, **kwargs): - trace_id = getattr(request.state, "trace_id", "-") - return logger.bind(trace_id=trace_id, **kwargs) - - -class GraphStatePayload(BaseModel): - """Graph state payload.""" - - nodes: List[Dict[str, Any]] = Field(default_factory=list, description="Node list") - edges: List[Dict[str, Any]] = Field(default_factory=list, description="Edge list") - viewport: Optional[Dict[str, Any]] = Field(default=None, description="Viewport info") - variables: Optional[Dict[str, Any]] = Field(default=None, description="Graph variables (e.g. context variables)") - # optional graph creation params (for upsert mode) - name: Optional[str] = Field(default=None, max_length=200, description="Graph name (for creating a new graph)") - workspaceId: Optional[uuid.UUID] = Field(default=None, description="Workspace ID (for creating a new graph)") - - -class CreateGraphRequest(BaseModel): - """Create graph request.""" - - name: str = Field(..., min_length=1, max_length=200, description="Graph name") - description: Optional[str] = Field(default=None, max_length=2000, description="Graph description") - color: Optional[str] = Field(default=None, max_length=2000, description="Color") - workspaceId: Optional[uuid.UUID] = Field(default=None, description="Workspace ID") - folderId: Optional[uuid.UUID] = Field(default=None, description="Folder ID") - parentId: Optional[uuid.UUID] = Field(default=None, description="Parent graph ID") - variables: Optional[Dict[str, Any]] = Field(default_factory=dict, description="Variables") - - -class UpdateGraphRequest(BaseModel): - """Update graph request.""" - - name: Optional[str] = Field(default=None, min_length=1, max_length=200, description="Graph name") - description: Optional[str] = Field(default=None, max_length=2000, description="Graph description") - color: Optional[str] = Field(default=None, max_length=2000, description="Color") - folderId: Optional[uuid.UUID] = Field(default=None, description="Folder ID") - parentId: Optional[uuid.UUID] = Field(default=None, description="Parent graph ID") - isDeployed: Optional[bool] = Field(default=None, description="Whether deployed") - - -async def _ensure_workspace_member( - *, - db: AsyncSession, - workspace_id: uuid.UUID, - current_user: User, - min_role: WorkspaceMemberRole, -) -> None: - """ - Ensure the user is a workspace member with sufficient permissions. - - Args: - db: database session - workspace_id: workspace ID - current_user: current user - min_role: minimum required role - - Raises: - NotFoundException: if the workspace does not exist - ForbiddenException: if the user is not a member or lacks permission - """ - from app.services.workspace_permission import check_workspace_access - - # check workspace existence - workspace_repo = WorkspaceRepository(db) - workspace = await workspace_repo.get(workspace_id) - if not workspace: - raise NotFoundException("Workspace not found") - - # check access permission - has_access = await check_workspace_access(db, workspace_id, current_user, min_role) - if not has_access: - raise ForbiddenException("No access to workspace or insufficient permission") - - -def _serialize_graph_row(graph: AgentGraph, node_count: int = 0) -> Dict[str, Any]: - """ - Serialize a graph object to a dict. - - Args: - graph: graph object - node_count: node count (optional) - - Returns: - Serialized dict - """ - return { - "id": str(graph.id), - "userId": str(graph.user_id), - "workspaceId": str(graph.workspace_id) if graph.workspace_id else None, - "folderId": str(graph.folder_id) if graph.folder_id else None, - "parentId": str(graph.parent_id) if graph.parent_id else None, - "name": graph.name, - "description": graph.description, - "color": graph.color, - "isDeployed": graph.is_deployed, - "variables": graph.variables or {}, - "createdAt": graph.created_at.isoformat() if graph.created_at else None, - "updatedAt": graph.updated_at.isoformat() if graph.updated_at else None, - "nodeCount": node_count, - } - - -@router.get("") -async def list_graphs( - request: Request, - workspace_id: Optional[uuid.UUID] = Query(default=None, alias="workspaceId"), - parent_id: Optional[uuid.UUID] = Query(default=None, alias="parentId"), - db: AsyncSession = Depends(get_db), - current_user: User = Depends(get_current_user), -): - """ - List graphs. - - Filtering logic: - - Default (no workspace_id): list all graphs owned by the current user (personal graphs) - - If workspace_id is provided: - - Check that the user has access to the workspace (at least viewer) - - If authorized, return all graphs in the workspace (not limited to user-created ones) - - If unauthorized, return an empty list - - If parentId is provided: list sub-graphs under the specified parent graph - """ - service = GraphService(db) - - log = _bind_log(request, user_id=str(current_user.id)) - - log.info(f"graph.list start workspace_id={workspace_id} parent_id={parent_id}") - - # if workspace_id is provided, check permission and return all workspace graphs - if workspace_id: - # check that the user has access (at least viewer) - await _ensure_workspace_member( - db=db, - workspace_id=workspace_id, - current_user=current_user, - min_role=WorkspaceMemberRole.viewer, - ) - # return all graphs in the workspace (not limited to user) - query = select(AgentGraph).where( - AgentGraph.workspace_id == workspace_id, - AgentGraph.deleted_at.is_(None), - ) - if parent_id is not None: - query = query.where(AgentGraph.parent_id == parent_id) - query = query.order_by(AgentGraph.created_at.desc(), AgentGraph.id.desc()) - result = await db.execute(query) - graphs = list(result.scalars().all()) - else: - # no workspace_id — return user-owned graphs (personal graphs) - graphs = await service.graph_repo.list_by_user_with_filters( - user_id=current_user.id, - parent_id=parent_id, - workspace_id=None, - ) - - # batch-query node counts for each graph - graph_ids = [graph.id for graph in graphs] - node_counts: Dict[Any, int] = {} - if graph_ids: - # use GROUP BY to query all node counts in one shot - count_query = ( - select(GraphNode.graph_id, func.count(GraphNode.id).label("count")) - .where(GraphNode.graph_id.in_(graph_ids)) - .group_by(GraphNode.graph_id) - ) - result = await db.execute(count_query) - for row in result: - count_val = getattr(row, "count", 0) - node_counts[row.graph_id] = int(count_val) if not callable(count_val) else count_val() # type: ignore[call-overload] - - log.info(f"graph.list success count={len(graphs)}") - return {"data": [_serialize_graph_row(graph, node_counts.get(graph.id, 0)) for graph in graphs]} - - -@router.get("/deployed") -async def list_deployed_graphs( - request: Request, - db: AsyncSession = Depends(get_db), - current_user: User = Depends(get_current_user), -): - """ - List deployed graphs accessible to the current user. - - Includes: - 1. Deployed graphs created by the user - 2. Deployed graphs in workspaces the user has access to (at least viewer) - - Filters: - - is_deployed = True - - User is the graph owner, or user has workspace access - """ - from sqlalchemy import or_ - - from app.models.workspace import WorkspaceMemberRole - from app.repositories.workspace import WorkspaceRepository - from app.services.workspace_permission import check_workspace_access - - log = _bind_log(request, user_id=str(current_user.id)) - log.info("graph.list_deployed start") - - workspace_repo = WorkspaceRepository(db) - user_workspaces = await workspace_repo.list_for_user(current_user.id) - accessible_workspace_ids = [ws.id for ws in user_workspaces] - - conditions = [AgentGraph.is_deployed, AgentGraph.deleted_at.is_(None)] - - user_owned_condition = AgentGraph.user_id == str(current_user.id) - - if accessible_workspace_ids: - workspace_condition = AgentGraph.workspace_id.in_(accessible_workspace_ids) - graph_condition = or_(user_owned_condition, workspace_condition) - else: - graph_condition = user_owned_condition - - conditions.append(graph_condition) # type: ignore[arg-type] - - query = select(AgentGraph).where(*conditions).order_by(AgentGraph.created_at.desc()) - result = await db.execute(query) - all_graphs = list(result.scalars().all()) - - filtered_graphs = [] - for graph in all_graphs: - if graph.user_id == str(current_user.id): - filtered_graphs.append(graph) - elif graph.workspace_id: - has_access = await check_workspace_access( - db, - graph.workspace_id, - current_user, - WorkspaceMemberRole.viewer, - ) - if has_access: - filtered_graphs.append(graph) - - graphs = filtered_graphs - - graph_ids = [graph.id for graph in graphs] - node_counts: Dict[Any, int] = {} - if graph_ids: - count_query = ( - select(GraphNode.graph_id, func.count(GraphNode.id).label("count")) - .where(GraphNode.graph_id.in_(graph_ids)) - .group_by(GraphNode.graph_id) - ) - result = await db.execute(count_query) - for row in result: - count_val = getattr(row, "count", 0) - node_counts[row.graph_id] = int(count_val) if not callable(count_val) else count_val() # type: ignore[call-overload] - - log.info(f"graph.list_deployed success count={len(graphs)}") - return {"data": [_serialize_graph_row(graph, node_counts.get(graph.id, 0)) for graph in graphs]} - - -@router.post("") -async def create_graph( - request: Request, - payload: CreateGraphRequest, - db: AsyncSession = Depends(get_db), - current_user: User = Depends(get_current_user), -): - """ - Create a new graph. - - - personal graph: created by the current user - - workspace graph: requires workspace write permission (member+) - """ - log = _bind_log(request, user_id=str(current_user.id)) - parent_id = payload.parentId - workspace_id = payload.workspaceId - - # workspace_id may be None (personal graph) - - if workspace_id: - await _ensure_workspace_member( - db=db, - workspace_id=workspace_id, - current_user=current_user, - min_role=WorkspaceMemberRole.member, - ) - - service = GraphService(db) - graph = await service.create_graph( - name=payload.name.strip(), - user_id=current_user.id, - workspace_id=workspace_id, - folder_id=payload.folderId, - parent_id=parent_id, - description=payload.description.strip() if payload.description else None, - color=payload.color, - variables=payload.variables, - ) - await db.commit() - log.info(f"graph.create success graph_id={graph.id} workspace_id={workspace_id} parent_id={parent_id}") - return {"data": _serialize_graph_row(graph)} - - -@router.get("/{graph_id}") -async def get_graph( - graph_id: uuid.UUID, - db: AsyncSession = Depends(get_db), - current_user: User = Depends(get_current_user), -): - """ - Get graph details including nodes and edges. - - Returns: - { - "data": { - "id": "...", - "name": "...", - "nodes": [...], - "edges": [...], - "viewport": {...}, - ... - } - } - """ - service = GraphService(db) - data = await service.get_graph_detail(graph_id, current_user) - return {"data": data} - - -@router.put("/{graph_id}") -async def update_graph( - graph_id: uuid.UUID, - payload: UpdateGraphRequest, - db: AsyncSession = Depends(get_db), - current_user: User = Depends(get_current_user), -): - """Update graph metadata (name/description/color/folderId/parentId/isDeployed).""" - service = GraphService(db) - graph = await service.graph_repo.get(graph_id) - if not graph: - raise NotFoundException("Graph not found") - - # permission check - await service._ensure_access(graph, current_user, WorkspaceMemberRole.member) - - update_data: Dict[str, Any] = {} - fields_set: set[str] = getattr(payload, "model_fields_set", set()) - - if payload.name is not None: - update_data["name"] = payload.name.strip() - if "description" in fields_set: - update_data["description"] = payload.description - if payload.color is not None: - update_data["color"] = payload.color - if "folderId" in fields_set: - # if folderId is provided, verify it exists and belongs to the current workspace - if payload.folderId is not None: - from app.repositories.workspace_folder import WorkflowFolderRepository - - folder_repo = WorkflowFolderRepository(db) - folder = await folder_repo.get(payload.folderId) - if not folder: - raise NotFoundException(f"Folder with id {payload.folderId} not found") - # ensure folder belongs to the graph's workspace - if graph.workspace_id and folder.workspace_id != graph.workspace_id: - from app.common.exceptions import BadRequestException - - raise BadRequestException( - f"Folder {payload.folderId} does not belong to workspace {graph.workspace_id}" - ) - # allow setting to None to clear the folder association - update_data["folder_id"] = payload.folderId - if "parentId" in fields_set: - # if parentId is provided, verify it exists (allow None to clear the parent relationship) - if payload.parentId is not None: - parent_graph = await service.graph_repo.get(payload.parentId) - if not parent_graph: - raise NotFoundException(f"Parent graph with id {payload.parentId} not found") - # allow setting to None to clear the parent graph relationship - update_data["parent_id"] = payload.parentId - if payload.isDeployed is not None: - update_data["is_deployed"] = payload.isDeployed - - if update_data: - await service.graph_repo.update(graph_id, update_data) - await db.commit() - - graph2 = await service.graph_repo.get(graph_id) - if not graph2: - raise NotFoundException("Graph not found") - return {"data": _serialize_graph_row(graph2)} - - -@router.delete("/{graph_id}") -async def delete_graph( - graph_id: uuid.UUID, - db: AsyncSession = Depends(get_db), - current_user: User = Depends(get_current_user), -): - """Delete a graph (requires member permission, i.e. write access).""" - service = GraphService(db) - graph = await service.graph_repo.get(graph_id) - if not graph: - raise NotFoundException("Graph not found") - - # permission check: - # - personal graph: only the owner can delete - # - workspace graph: requires at least member permission (write access) - if graph.workspace_id: - # workspace graph: requires member permission to delete - await service._ensure_access( - graph, - current_user, - required_role=WorkspaceMemberRole.member, - ) - else: - # personal graph: only the owner can delete - if graph.user_id != current_user.id: - raise ForbiddenException("Only graph owner can delete personal graph") - - await service.graph_repo.soft_delete(graph_id) - await db.commit() - return {"success": True} - - -@router.get("/{graph_id}/state") -async def load_graph_state( - graph_id: uuid.UUID, - db: AsyncSession = Depends(get_db), - current_user: User = Depends(get_current_user), -): - """ - Load graph state (nodes and edges). - - Returns: - { - "success": true, - "data": { - "nodes": [...], - "edges": [...], - "viewport": {...} - } - } - """ - service = GraphService(db) - state = await service.load_graph_state(graph_id, current_user) - return {"success": True, "data": state} - - -@router.post("/{graph_id}/state") -async def save_graph_state( - request: Request, - graph_id: uuid.UUID, - payload: GraphStatePayload, - db: AsyncSession = Depends(get_db), - current_user: User = Depends(get_current_user), -): - """ - Save graph state (nodes and edges) — supports upsert mode. - - If the graph does not exist, automatically create a new one (requires name parameter). - - Accepts frontend format: - { - "nodes": [...], - "edges": [...], - "viewport": {...}, - "name": "optional, for creating a new graph", - "workspaceId": "optional, for creating a new graph" - } - """ - log = _bind_log(request, user_id=str(current_user.id), graph_id=str(graph_id)) - service = GraphService(db) - - # workspace_id may be None (personal graph) - workspace_id = payload.workspaceId - - result = await service.save_graph_state( - graph_id=graph_id, - nodes=payload.nodes, - edges=payload.edges, - viewport=payload.viewport, - variables=payload.variables, - current_user=current_user, - # upsert parameters - name=payload.name, - workspace_id=workspace_id, - ) - - # explicitly commit the transaction to persist data - # note: get_db() does not auto-commit; an explicit commit() is required - await db.commit() - - log.info(f"graph.state.save success nodes={len(payload.nodes)} edges={len(payload.edges)}") - return {"success": True, **result} - - -@router.put("/{graph_id}/state") -async def save_graph_state_put( - request: Request, - graph_id: uuid.UUID, - payload: GraphStatePayload, - db: AsyncSession = Depends(get_db), - current_user: User = Depends(get_current_user), -): - """PUT alias for saving graph state.""" - return await save_graph_state(request, graph_id, payload, db, current_user) - - -# ==================== Compile (pre-build + cache warm) ==================== - - -@router.post("/{graph_id}/compile") -async def compile_graph( - request: Request, - graph_id: uuid.UUID, - db: AsyncSession = Depends(get_db), - current_user: User = Depends(get_current_user), -) -> Dict[str, Any]: - """Compile a graph and warm the cache; return build time. Execution uses the cache if not expired.""" - service = GraphService(db) - graph = await service.graph_repo.get(graph_id) - if not graph: - raise NotFoundException("Graph not found") - await service._ensure_access(graph, current_user, WorkspaceMemberRole.viewer) - start = time.time() - try: - await service.create_graph_by_graph_id( - graph_id=graph_id, - user_id=current_user.id, - current_user=current_user, - ) - build_time_ms = (time.time() - start) * 1000 - return {"ok": True, "build_time_ms": round(build_time_ms, 2)} - except Exception as e: - log = _bind_log(request, user_id=str(current_user.id), graph_id=str(graph_id)) - log.error(f"graph.compile failed: {e}") - raise - - -# ==================== Copilot Endpoints ==================== - - -@router.get("/{graph_id}/copilot/history") -async def get_copilot_history( - request: Request, - graph_id: uuid.UUID, - current_user: User = Depends(get_current_user), - db: AsyncSession = Depends(get_db), -): - log = _bind_log(request, user_id=str(current_user.id), graph_id=str(graph_id)) - log.info("copilot.history.get start") - - graph_service = GraphService(db) - graph = await graph_service.graph_repo.get(graph_id) - if not graph: - raise NotFoundException("Graph not found") - await graph_service._ensure_access(graph, current_user, WorkspaceMemberRole.viewer) - - repo = AgentRunRepository(db) - runs = await repo.list_recent_runs_for_user( - user_id=str(current_user.id), - agent_name="copilot", - graph_id=graph_id, - limit=100, - ) - - messages = [] - for run in reversed(list(runs)): # oldest first - snapshot = await repo.get_snapshot(run.id) - if not snapshot or not snapshot.projection: - continue - p = snapshot.projection - messages.append( - { - "role": "user", - "content": run.title or "", - "created_at": run.created_at.isoformat() if run.created_at else None, - } - ) - messages.append( - { - "role": "assistant", - "content": p.get("result_message") or p.get("content", ""), - "created_at": run.updated_at.isoformat() if run.updated_at else None, - "actions": p.get("result_actions", []), - "thought_steps": p.get("thought_steps", []), - "tool_calls": p.get("tool_calls", []), - } - ) - - log.info(f"copilot.history.get success messages_count={len(messages)}") - return {"data": {"graph_id": str(graph_id), "messages": messages}} - - -@router.delete("/{graph_id}/copilot/history") -async def clear_copilot_history( - request: Request, - graph_id: uuid.UUID, - current_user: User = Depends(get_current_user), - db: AsyncSession = Depends(get_db), -): - log = _bind_log(request, user_id=str(current_user.id), graph_id=str(graph_id)) - log.info("copilot.history.clear start") - - graph_service = GraphService(db) - graph = await graph_service.graph_repo.get(graph_id) - if not graph: - raise NotFoundException("Graph not found") - await graph_service._ensure_access(graph, current_user, WorkspaceMemberRole.member) - - repo = AgentRunRepository(db) - deleted = await repo.delete_runs_for_graph( - user_id=str(current_user.id), - agent_name="copilot", - graph_id=graph_id, - ) - - log.info(f"copilot.history.clear success deleted={deleted}") - return {"success": True} diff --git a/backend/app/api/v1/mcp.py b/backend/app/api/v1/mcp.py index cba65c8e1..1e3fe3b88 100644 --- a/backend/app/api/v1/mcp.py +++ b/backend/app/api/v1/mcp.py @@ -14,8 +14,8 @@ from pydantic import BaseModel, Field from sqlalchemy.ext.asyncio import AsyncSession +from app.common.app_errors import InvalidRequestError, NotFoundError from app.common.dependencies import get_current_user -from app.common.exceptions import BadRequestException, NotFoundException from app.core.database import get_db from app.models.auth import AuthUser as User from app.services.mcp_client_service import McpConnectionConfig, get_mcp_client @@ -387,8 +387,10 @@ async def execute_tool( if not tool: # get_mcp_tool_with_instance already logs detailed warnings - raise NotFoundException( - f"MCP tool '{request.toolName}' not found on server '{request.serverName}' or server is not accessible" + raise NotFoundError( + f"MCP tool '{request.toolName}' not found on server '{request.serverName}' or server is not accessible", + code="MCP_TOOL_NOT_FOUND", + data={"server_name": request.serverName, "tool_name": request.toolName}, ) try: @@ -405,4 +407,8 @@ async def execute_tool( f"toolName={request.toolName}, error={str(e)}", exc_info=True, ) - raise BadRequestException(f"Tool execution failed: {str(e)}") + raise InvalidRequestError( + f"Tool execution failed: {str(e)}", + code="MCP_TOOL_EXECUTION_FAILED", + data={"server_name": request.serverName, "tool_name": request.toolName}, + ) diff --git a/backend/app/api/v1/memory/memory.py b/backend/app/api/v1/memory/memory.py index 8b62aa705..d048913a6 100644 --- a/backend/app/api/v1/memory/memory.py +++ b/backend/app/api/v1/memory/memory.py @@ -24,14 +24,14 @@ UserMemoryCreateSchema, UserMemorySchema, ) -from app.common.dependencies import get_current_user -from app.common.exceptions import ( - AppException, - BadRequestException, - InternalServerException, - NotFoundException, - ValidationException, +from app.common.app_errors import ( + AppError, + InternalServiceError, + InvalidRequestError, + NotFoundError, + RequestValidationAppError, ) +from app.common.dependencies import get_current_user from app.core.database import get_db from app.models.auth import AuthUser as User from app.schemas.memory import UserMemory @@ -87,7 +87,7 @@ def parse_topics( return [topic.strip() for topic in topics.split(",") if topic.strip()] except Exception as e: - raise ValidationException(f"Invalid topics format: {e}") + raise RequestValidationAppError(f"Invalid topics format: {e}") @router.post( @@ -142,7 +142,11 @@ async def create_memory( ) if not user_memory: - raise InternalServerException("Failed to create memory") + raise InternalServiceError( + "Failed to create memory", + code="MEMORY_CREATE_FAILED", + data={"user_id": payload.user_id}, + ) return UserMemorySchema.from_dict(_normalize_memory_dict(user_memory)) # type: ignore @@ -168,7 +172,11 @@ async def delete_memory( db = MemoryService(db_session) success = await db.delete_user_memory(memory_id=memory_id, user_id=str(current_user.id)) if not success: - raise NotFoundException(f"Memory with ID {memory_id} not found") + raise NotFoundError( + "Memory not found", + code="MEMORY_NOT_FOUND", + data={"memory_id": memory_id}, + ) return None @@ -193,7 +201,11 @@ async def delete_memories( current_user: User = Depends(get_current_user), ) -> None: if not request.memory_ids: - raise BadRequestException("memory_ids must not be empty") + raise InvalidRequestError( + "memory_ids must not be empty", + code="MEMORY_IDS_EMPTY", + data={"memory_ids": request.memory_ids}, + ) db = MemoryService(db_session) await db.delete_user_memories(memory_ids=request.memory_ids, user_id=str(current_user.id)) return None @@ -285,7 +297,11 @@ async def get_memory( user_memory = await db.get_user_memory(memory_id=memory_id, user_id=str(current_user.id), deserialize=False) if not user_memory: - raise NotFoundException(f"Memory with ID {memory_id} not found") + raise NotFoundError( + "Memory not found", + code="MEMORY_NOT_FOUND", + data={"memory_id": memory_id}, + ) return UserMemorySchema.from_dict(_normalize_memory_dict(user_memory)) # type: ignore @@ -351,7 +367,11 @@ async def update_memory( deserialize=False, ) if not user_memory: - raise InternalServerException("Failed to update memory") + raise InternalServiceError( + "Failed to update memory", + code="MEMORY_UPDATE_FAILED", + data={"memory_id": memory_id, "user_id": payload.user_id}, + ) return UserMemorySchema.from_dict(_normalize_memory_dict(user_memory)) # type: ignore @@ -387,14 +407,17 @@ async def optimize_memories( from app.services.model_service import ModelService if not request.model: - raise BadRequestException( + raise InvalidRequestError( "Model is required. Specify 'model' in format 'provider:model_name' (e.g., 'openai:gpt-4o-mini').", + code="MEMORY_OPTIMIZATION_MODEL_REQUIRED", ) provider_name, model_name = parse_model_ref(request.model) if not model_name: - raise BadRequestException( + raise InvalidRequestError( "Invalid model format. Specify 'model' in format 'provider:model_name' (e.g., 'openai:gpt-4o-mini').", + code="MEMORY_OPTIMIZATION_MODEL_INVALID", + data={"model": request.model}, ) model_service = ModelService(db_session) @@ -419,7 +442,11 @@ async def optimize_memories( user_id = request.user_id or str(current_user.id) memories_before = await memory_manager.aget_user_memories(user_id=user_id) if not memories_before: - raise NotFoundException(f"No memories found for user {user_id}") + raise NotFoundError( + "No memories found for user", + code="MEMORY_NOT_FOUND", + data={"user_id": user_id}, + ) # Count tokens before optimization strategy = SummarizeStrategy() @@ -469,8 +496,12 @@ async def optimize_memories( reduction_percentage=reduction_percentage, ) - except AppException: + except AppError: raise except Exception as e: logger.error(f"Failed to optimize memories for user {request.user_id}: {str(e)}") - raise InternalServerException(f"Failed to optimize memories: {str(e)}") + raise InternalServiceError( + "Failed to optimize memories", + code="MEMORY_OPTIMIZATION_FAILED", + data={"user_id": request.user_id or str(current_user.id)}, + ) from e diff --git a/backend/app/api/v1/model_providers.py b/backend/app/api/v1/model_providers.py index 1df3a48b9..1187086d4 100644 --- a/backend/app/api/v1/model_providers.py +++ b/backend/app/api/v1/model_providers.py @@ -74,9 +74,13 @@ async def get_provider( provider = await service.get_provider(provider_name) if not provider: - from app.common.exceptions import NotFoundException + from app.common.app_errors import NotFoundError - raise NotFoundException(f"Provider not found: {provider_name}") + raise NotFoundError( + "Provider not found", + code="MODEL_PROVIDER_NOT_FOUND", + data={"provider_name": provider_name}, + ) return success_response(data=provider, message="Provider details retrieved") diff --git a/backend/app/api/v1/models.py b/backend/app/api/v1/models.py index b2177c9e8..7111a06c4 100644 --- a/backend/app/api/v1/models.py +++ b/backend/app/api/v1/models.py @@ -10,6 +10,7 @@ from pydantic import BaseModel, Field from sqlalchemy.ext.asyncio import AsyncSession +from app.common.app_errors import InvalidRequestError from app.common.dependencies import get_current_user from app.common.response import success_response from app.core.database import get_db @@ -67,9 +68,11 @@ async def list_available_models( try: model_type_enum = ModelType(model_type) except ValueError: - from app.common.exceptions import BadRequestException - - raise BadRequestException(f"Unsupported model type: {model_type}") + raise InvalidRequestError( + f"Unsupported model type: {model_type}", + code="MODEL_TYPE_UNSUPPORTED", + data={"model_type": model_type}, + ) service = ModelService(db) models = await service.get_available_models(model_type=model_type_enum, user_id=current_user.id) @@ -86,9 +89,11 @@ async def create_model_instance( try: model_type_enum = ModelType(payload.model_type) except ValueError: - from app.common.exceptions import BadRequestException - - raise BadRequestException(f"Unsupported model type: {payload.model_type}") + raise InvalidRequestError( + f"Unsupported model type: {payload.model_type}", + code="MODEL_TYPE_UNSUPPORTED", + data={"model_type": payload.model_type}, + ) service = ModelService(db) instance = await service.create_model_instance_config( diff --git a/backend/app/api/v1/oauth.py b/backend/app/api/v1/oauth.py index 78f4b830b..a1cbf3d76 100644 --- a/backend/app/api/v1/oauth.py +++ b/backend/app/api/v1/oauth.py @@ -15,6 +15,7 @@ import secrets from datetime import datetime, timedelta, timezone from typing import Any, Dict, List, Optional +from urllib.parse import urlencode from fastapi import APIRouter, Depends, Query, Request from fastapi.responses import RedirectResponse @@ -22,8 +23,9 @@ from pydantic import BaseModel from sqlalchemy.ext.asyncio import AsyncSession +from app.common.app_errors import InvalidRequestError from app.common.dependencies import get_db -from app.common.exceptions import BadRequestException +from app.common.response import success_response from app.core.oauth import get_oauth_config, get_protocol_handler from app.core.redis import RedisClient from app.core.security import create_access_token, create_csrf_token, generate_refresh_token @@ -73,7 +75,7 @@ async def oauth_authorize( request: Request, callback_url: Optional[str] = Query(None, description="Redirect URL after successful login"), db: AsyncSession = Depends(get_db), -) -> RedirectResponse: +) -> Dict[str, Any]: """ Start OAuth authorization flow. @@ -106,19 +108,20 @@ async def oauth_authorize( except Exception as e: logger.warning(f"{LOG_PREFIX} Failed to store state in Redis: {e}") - # Generate authorization URL - try: - authorization_url, _ = await oauth_service.generate_authorization_url( - provider_name=provider, - redirect_uri=redirect_uri, - state=state, - ) - except Exception as e: - logger.error(f"{LOG_PREFIX} Failed to generate authorization URL: {e}") - raise BadRequestException(f"Failed to initiate OAuth flow: {str(e)}") + authorization_url, resolved_state = await oauth_service.generate_authorization_url( + provider_name=provider, + redirect_uri=redirect_uri, + state=state, + ) - logger.info(f"{LOG_PREFIX} Redirecting to {provider} authorization") - return RedirectResponse(url=authorization_url, status_code=302) + logger.info(f"{LOG_PREFIX} Generated authorization URL for {provider}") + return success_response( + data={ + "authorization_url": authorization_url, + "state": resolved_state, + }, + message="OAuth authorization URL generated", + ) @router.get("/{provider}/callback") @@ -146,13 +149,13 @@ async def oauth_callback( # Handle user denial if error: logger.warning(f"{LOG_PREFIX} OAuth error: {error} - {error_description}") - return _redirect_with_error(frontend_url, "oauth_denied", error_description or error) + return _redirect_with_error(frontend_url, "OAUTH_ACCESS_DENIED", error_description or error) # 2. Load provider config (needed to detect protocol) provider_config = oauth_config.get_provider(provider) if not provider_config: logger.error(f"{LOG_PREFIX} Provider not found: {provider}") - return _redirect_with_error(frontend_url, "provider_not_found") + return _redirect_with_error(frontend_url, "OAUTH_PROVIDER_NOT_FOUND") # 1. Validate state (JD SSO can skip; it relies on Cookie, not auth code) callback_url = oauth_config.settings.default_redirect_url @@ -162,16 +165,16 @@ async def oauth_callback( # Validate when state is present state_data, callback_url = await _validate_state(state, oauth_config) if state_data is None: - return _redirect_with_error(frontend_url, "invalid_state") + return _redirect_with_error(frontend_url, "OAUTH_STATE_INVALID") # Validate provider match if state_data.get("provider") != provider: logger.warning(f"{LOG_PREFIX} Provider mismatch: expected {state_data.get('provider')}, got {provider}") - return _redirect_with_error(frontend_url, "provider_mismatch") + return _redirect_with_error(frontend_url, "OAUTH_PROVIDER_MISMATCH") elif provider_config.protocol != "jd_sso": # Non-JD SSO protocols require state logger.warning(f"{LOG_PREFIX} Missing state parameter for {provider_config.protocol}") - return _redirect_with_error(frontend_url, "missing_state") + return _redirect_with_error(frontend_url, "OAUTH_STATE_MISSING") try: # 3. Use protocol handler to fetch user info @@ -236,17 +239,17 @@ async def oauth_callback( return response - except BadRequestException: + except InvalidRequestError: raise except ValueError as e: # Validation error raised by protocol handler logger.error(f"{LOG_PREFIX} OAuth callback validation error: {e}") await db.rollback() - return _redirect_with_error(frontend_url, "oauth_failed", str(e)) + return _redirect_with_error(frontend_url, "OAUTH_CALLBACK_INVALID", str(e)) except Exception as e: logger.error(f"{LOG_PREFIX} OAuth callback error: {e}", exc_info=True) await db.rollback() - return _redirect_with_error(frontend_url, "oauth_failed", str(e)) + return _redirect_with_error(frontend_url, "OAUTH_CALLBACK_FAILED", str(e)) # ==================== User OAuth Account Management ==================== @@ -336,11 +339,12 @@ def _get_client_ip(request: Request) -> str: return ip -def _redirect_with_error(frontend_url: str, error: str, description: Optional[str] = None) -> RedirectResponse: +def _redirect_with_error(frontend_url: str, error_code: str, message: Optional[str] = None) -> RedirectResponse: """Build error redirect response.""" - error_url = f"{frontend_url}/signin?error={error}" - if description: - error_url += f"&error_description={description}" + params = {"error_code": error_code} + if message: + params["error_message"] = message + error_url = f"{frontend_url}/signin?{urlencode(params)}" return RedirectResponse(url=error_url, status_code=302) diff --git a/backend/app/api/v1/openapi_graph.py b/backend/app/api/v1/openapi_graph.py deleted file mode 100644 index 1c2b585a2..000000000 --- a/backend/app/api/v1/openapi_graph.py +++ /dev/null @@ -1,181 +0,0 @@ -""" -OpenAPI Graph routes — trigger Graph execution via PlatformToken auth - -Endpoints: -- POST /v1/openapi/graph/{graphId}/run Start execution -- GET /v1/openapi/graph/{executionId}/status Query status -- POST /v1/openapi/graph/{executionId}/abort Abort execution -- GET /v1/openapi/graph/{executionId}/result Get result -""" - -from __future__ import annotations - -import uuid -from typing import Any, Dict, Optional - -from fastapi import APIRouter, Depends, Request -from loguru import logger -from pydantic import BaseModel, Field -from sqlalchemy.ext.asyncio import AsyncSession - -from app.common.auth_dependency import AuthContext, get_current_user_or_token -from app.common.exceptions import ForbiddenException -from app.common.permissions import check_token_permission -from app.core.database import get_db -from app.services.openapi_graph_service import OpenApiGraphService - -router = APIRouter(prefix="/v1/openapi/graph", tags=["OpenAPI Graph"]) - - -# ─── Request / Response Models ───────────────────────────────── - - -class RunGraphRequest(BaseModel): - """Request body for starting a Graph execution.""" - - variables: Optional[Dict[str, Any]] = Field( - default=None, - description="Runtime variables. message/query is used as the user message; the rest are context variables.", - ) - - -class OpenApiResponse(BaseModel): - """Unified response format.""" - - success: bool = True - data: Optional[Dict[str, Any]] = None - errCode: Optional[str] = None - errMsg: Optional[str] = None - - -# ─── Helper ───────────────────────────────────── - - -def _bind_log(request: Request, **kwargs): - trace_id = getattr(request.state, "trace_id", "-") - return logger.bind(trace_id=trace_id, **kwargs) - - -def _require_graph_execute(auth: AuthContext, graph_id: uuid.UUID) -> None: - """Require graphs:execute scope if using token auth.""" - if not auth.is_token_auth: - return - has_perm = check_token_permission( - token_scopes=auth.token_scopes or [], - required_scope="graphs:execute", - resource_type="graph", - resource_id=str(graph_id), - token_resource_type=auth.token_resource_type, - token_resource_id=auth.token_resource_id, - ) - if not has_perm: - raise ForbiddenException("Token missing required scope: graphs:execute") - - -# ─── Endpoints ───────────────────────────────────── - - -@router.post("/{graph_id}/run") -async def run_graph( - request: Request, - graph_id: uuid.UUID, - payload: RunGraphRequest = RunGraphRequest(), - auth: AuthContext = Depends(get_current_user_or_token), - db: AsyncSession = Depends(get_db), -): - """ - Start a Graph execution. - - Authenticate via PlatformToken and start an async Graph execution. - Returns an executionId for subsequent status queries and result retrieval. - """ - _require_graph_execute(auth, graph_id) - user = auth.user - log = _bind_log(request, user_id=str(user.id), graph_id=str(graph_id)) - log.info("openapi.graph.run start") - - service = OpenApiGraphService(db) - result = await service.run_graph( - graph_id=graph_id, - user_id=user.id, - variables=payload.variables, - ) - - log.info(f"openapi.graph.run success execution_id={result['executionId']}") - return {"success": True, "data": result} - - -@router.get("/{execution_id}/status") -async def get_execution_status( - request: Request, - execution_id: uuid.UUID, - auth: AuthContext = Depends(get_current_user_or_token), - db: AsyncSession = Depends(get_db), -): - """ - Query execution status. - - Return the current status (init / executing / finish / failed). - """ - user = auth.user - log = _bind_log(request, user_id=str(user.id), execution_id=str(execution_id)) - log.info("openapi.graph.status start") - - service = OpenApiGraphService(db) - result = await service.get_status(execution_id, user.id) - - _require_graph_execute(auth, uuid.UUID(result["graphId"])) - - log.info(f"openapi.graph.status success status={result['status']}") - return {"success": True, "data": result} - - -@router.post("/{execution_id}/abort") -async def abort_execution( - request: Request, - execution_id: uuid.UUID, - auth: AuthContext = Depends(get_current_user_or_token), - db: AsyncSession = Depends(get_db), -): - """ - Abort execution. - - Abort a running Graph execution. - """ - user = auth.user - log = _bind_log(request, user_id=str(user.id), execution_id=str(execution_id)) - log.info("openapi.graph.abort start") - - service = OpenApiGraphService(db) - result = await service.abort_execution(execution_id, user.id) - - _require_graph_execute(auth, uuid.UUID(result["graphId"])) - - log.info(f"openapi.graph.abort success status={result['status']}") - return {"success": True, "data": result} - - -@router.get("/{execution_id}/result") -async def get_execution_result( - request: Request, - execution_id: uuid.UUID, - auth: AuthContext = Depends(get_current_user_or_token), - db: AsyncSession = Depends(get_db), -): - """ - Get execution result. - - Retrieve the output of a Graph execution. - If execution is not yet complete, output is null. - """ - user = auth.user - log = _bind_log(request, user_id=str(user.id), execution_id=str(execution_id)) - log.info("openapi.graph.result start") - - service = OpenApiGraphService(db) - result = await service.get_result(execution_id, user.id) - - _require_graph_execute(auth, uuid.UUID(result["graphId"])) - - log.info(f"openapi.graph.result success status={result['status']}") - return {"success": True, "data": result} diff --git a/backend/app/api/v1/openclaw_chat.py b/backend/app/api/v1/openclaw_chat.py index b6dc4242f..ff072b51a 100644 --- a/backend/app/api/v1/openclaw_chat.py +++ b/backend/app/api/v1/openclaw_chat.py @@ -17,6 +17,7 @@ from sqlalchemy.ext.asyncio import AsyncSession from app.common.dependencies import get_current_user +from app.common.stream_errors import stream_error_event from app.core.database import get_db from app.models.auth import AuthUser as User from app.services.openclaw_instance_service import OpenClawInstanceService @@ -89,7 +90,14 @@ async def _stream_sse(url: str, headers: dict, body: dict): else: yield "\n" except Exception as e: - yield f"data: {{'error': '{str(e)}'}}\n\n" + yield stream_error_event( + code="OPENCLAW_STREAM_ERROR", + message="OpenClaw stream failed.", + data={"detail": str(e)}, + source="runtime", + retryable=True, + user_action="retry", + ) @router.post("/tools/invoke") diff --git a/backend/app/api/v1/openclaw_devices.py b/backend/app/api/v1/openclaw_devices.py index e5910c2e1..b6c4d5fce 100644 --- a/backend/app/api/v1/openclaw_devices.py +++ b/backend/app/api/v1/openclaw_devices.py @@ -16,6 +16,7 @@ from loguru import logger from sqlalchemy.ext.asyncio import AsyncSession +from app.common.app_errors import InvalidRequestError, NotFoundError, ServiceUnavailableError from app.common.dependencies import get_current_user from app.core.agent.backends.docker_check import get_docker_client from app.core.database import get_db @@ -42,10 +43,23 @@ async def _docker_exec(container_id: str, cmd: list[str]) -> str: exit_code, output = await asyncio.to_thread(container.exec_run, cmd=cmd) output_str = output.decode("utf-8") if isinstance(output, bytes) else str(output) if exit_code != 0: - raise RuntimeError(f"Command failed with exit code {exit_code}: {output_str.strip()}") + raise ServiceUnavailableError( + "OpenClaw device command failed", + code="OPENCLAW_DEVICE_COMMAND_FAILED", + data={ + "container_id": container_id, + "command": cmd, + "exit_code": exit_code, + "detail": output_str.strip(), + }, + ) return output_str.strip() except docker.errors.NotFound: - raise RuntimeError(f"Container {container_id} not found") + raise NotFoundError( + "OpenClaw container not found", + code="OPENCLAW_CONTAINER_NOT_FOUND", + data={"container_id": container_id}, + ) @router.get("") @@ -56,7 +70,7 @@ async def list_devices( """List devices paired with the user's OpenClaw instance.""" instance = await _get_running_instance(db, str(current_user.id)) if not instance: - return {"success": True, "data": []} + raise InvalidRequestError("No running instance", code="OPENCLAW_INSTANCE_NOT_RUNNING") try: output = await _docker_exec(instance.container_id, ["openclaw", "devices", "list", "--json"]) @@ -64,7 +78,7 @@ async def list_devices( return {"success": True, "data": devices} except Exception as e: logger.warning(f"Failed to list devices for user {current_user.id}: {e}") - return {"success": True, "data": [], "warning": str(e)} + raise ServiceUnavailableError(str(e), code="OPENCLAW_DEVICE_LIST_FAILED") @router.post("/{device_id}/approve") @@ -76,13 +90,13 @@ async def approve_device( """Approve a specific device pairing request.""" instance = await _get_running_instance(db, str(current_user.id)) if not instance: - return {"success": False, "error": "No running instance"} + raise InvalidRequestError("No running instance", code="OPENCLAW_INSTANCE_NOT_RUNNING") try: await _docker_exec(instance.container_id, ["openclaw", "devices", "approve", device_id]) return {"success": True} except Exception as e: - return {"success": False, "error": str(e)} + raise ServiceUnavailableError(str(e), code="OPENCLAW_DEVICE_APPROVE_FAILED") @router.post("/approve-all") @@ -93,7 +107,7 @@ async def approve_all_devices( """Approve all pending device pairing requests.""" instance = await _get_running_instance(db, str(current_user.id)) if not instance: - return {"success": False, "error": "No running instance"} + raise InvalidRequestError("No running instance", code="OPENCLAW_INSTANCE_NOT_RUNNING") try: output = await _docker_exec(instance.container_id, ["openclaw", "devices", "list", "--json"]) @@ -105,4 +119,4 @@ async def approve_all_devices( await _docker_exec(instance.container_id, ["openclaw", "devices", "approve", device_id]) return {"success": True} except Exception as e: - return {"success": False, "error": str(e)} + raise ServiceUnavailableError(str(e), code="OPENCLAW_DEVICE_APPROVE_ALL_FAILED") diff --git a/backend/app/api/v1/openclaw_instances.py b/backend/app/api/v1/openclaw_instances.py index ae18a0698..1b1ab9fc1 100644 --- a/backend/app/api/v1/openclaw_instances.py +++ b/backend/app/api/v1/openclaw_instances.py @@ -12,6 +12,7 @@ from pydantic import BaseModel, Field from sqlalchemy.ext.asyncio import AsyncSession +from app.common.app_errors import InvalidRequestError, NotFoundError, ServiceUnavailableError from app.common.dependencies import get_current_user from app.core.database import get_db from app.models.auth import AuthUser as User @@ -71,7 +72,7 @@ async def start_instance( instance = await service.ensure_instance_running(str(current_user.id)) return {"success": True, "data": _serialize_instance(instance)} except RuntimeError as exc: - return {"success": False, "error": str(exc)} + raise ServiceUnavailableError(str(exc), code="OPENCLAW_INSTANCE_START_FAILED") @router.post("/stop") @@ -83,7 +84,7 @@ async def stop_instance( service = OpenClawInstanceService(db) instance = await service.stop_instance(str(current_user.id)) if not instance: - return {"success": False, "error": "No instance found"} + raise NotFoundError("No instance found", code="OPENCLAW_INSTANCE_NOT_FOUND") return {"success": True, "data": _serialize_instance(instance)} @@ -98,7 +99,7 @@ async def restart_instance( instance = await service.restart_instance(str(current_user.id)) return {"success": True, "data": _serialize_instance(instance)} except RuntimeError as exc: - return {"success": False, "error": str(exc)} + raise ServiceUnavailableError(str(exc), code="OPENCLAW_INSTANCE_RESTART_FAILED") @router.delete("") @@ -110,7 +111,7 @@ async def delete_instance( service = OpenClawInstanceService(db) deleted = await service.delete_instance(str(current_user.id)) if not deleted: - return {"success": False, "error": "No instance found"} + raise NotFoundError("No instance found", code="OPENCLAW_INSTANCE_NOT_FOUND") return {"success": True} @@ -124,11 +125,11 @@ async def sync_skills( instance = await service.get_instance_by_user(str(current_user.id)) if not instance or not instance.container_id or instance.status != InstanceStatus.RUNNING: - return {"success": False, "error": "Instance is not running"} + raise InvalidRequestError("Instance is not running", code="OPENCLAW_INSTANCE_NOT_RUNNING") synced_count = await service.sync_skills_to_container(str(current_user.id), instance.container_id) if synced_count < 0: - return {"success": False, "error": "Failed to sync skills"} + raise ServiceUnavailableError("Failed to sync skills", code="OPENCLAW_SKILL_SYNC_FAILED") return {"success": True, "data": {"syncedCount": synced_count}} diff --git a/backend/app/api/v1/openclaw_proxy.py b/backend/app/api/v1/openclaw_proxy.py index b2b36ca66..a3f1dcece 100644 --- a/backend/app/api/v1/openclaw_proxy.py +++ b/backend/app/api/v1/openclaw_proxy.py @@ -26,6 +26,7 @@ from app.models.auth import AuthUser as User from app.models.enums import InstanceStatus from app.services.openclaw_instance_service import OpenClawInstanceService +from app.utils.safe_task import safe_create_task router = APIRouter(prefix="/v1/openclaw/proxy", tags=["OpenClaw Proxy"]) @@ -134,7 +135,10 @@ async def proxy_to_openclaw( # Auto device pair: wait for the client to connect WebSocket and then approve if is_entry and instance.container_id: - asyncio.create_task(_poll_approve_devices(instance.container_id)) + safe_create_task( + _poll_approve_devices(instance.container_id), + name=f"device-approve-{instance.container_id[:12]}", + ) query_params = {k: v for k, v in parse_qs(request.url.query).items()} if is_entry: diff --git a/backend/app/api/v1/runs.py b/backend/app/api/v1/runs.py deleted file mode 100644 index 388f23cc2..000000000 --- a/backend/app/api/v1/runs.py +++ /dev/null @@ -1,290 +0,0 @@ -"""Runs API.""" - -from __future__ import annotations - -import uuid - -from fastapi import APIRouter, Depends, Query -from loguru import logger -from sqlalchemy.ext.asyncio import AsyncSession - -from app.common.dependencies import CurrentUser -from app.common.exceptions import BadRequestException -from app.core.database import get_db -from app.models.agent_run import AgentRun, AgentRunStatus -from app.schemas import BaseResponse -from app.schemas.runs import ( - AgentDefinitionResponse, - AgentListResponse, - CreateRunRequest, - CreateRunResponse, - CreateSkillCreatorRunRequest, - RunEventResponse, - RunEventsPageResponse, - RunListResponse, - RunSnapshotResponse, - RunSummary, -) -from app.services.run_reducers import agent_registry -from app.services.run_service import RunService -from app.utils.task_manager import task_manager - -router = APIRouter(prefix="/v1/runs", tags=["Runs"]) - - -def _to_run_summary(run: AgentRun) -> RunSummary: - definition = agent_registry.find(run.agent_name) - return RunSummary( - run_id=run.id, - status=run.status.value if hasattr(run.status, "value") else str(run.status), - run_type=run.run_type, - agent_name=run.agent_name, - agent_display_name=definition.display_name if definition else run.agent_name, - source=run.source, - thread_id=run.thread_id, - graph_id=run.graph_id, - title=run.title, - started_at=run.started_at, - finished_at=run.finished_at, - last_seq=run.last_seq, - error_code=run.error_code, - error_message=run.error_message, - last_heartbeat_at=run.last_heartbeat_at, - updated_at=run.updated_at, - ) - - -@router.get("", response_model=BaseResponse[RunListResponse]) -async def list_runs( - current_user: CurrentUser, - run_type: str | None = Query(None), - agent_name: str | None = Query(None), - status: str | None = Query(None), - search: str | None = Query(None, max_length=200), - limit: int = Query(50, ge=1, le=200), - db: AsyncSession = Depends(get_db), -) -> BaseResponse[RunListResponse]: - service = RunService(db) - runs = await service.list_recent_runs( - user_id=str(current_user.id), - run_type=run_type, - agent_name=agent_name, - status=status, - search=search, - limit=limit, - ) - return BaseResponse( - success=True, - code=200, - msg="ok", - data=RunListResponse(items=[_to_run_summary(run) for run in runs]), - ) - - -@router.get("/agents", response_model=BaseResponse[AgentListResponse]) -async def list_agents( - current_user: CurrentUser, - db: AsyncSession = Depends(get_db), -) -> BaseResponse[AgentListResponse]: - service = RunService(db) - return BaseResponse( - success=True, - code=200, - msg="ok", - data=AgentListResponse( - items=[ - AgentDefinitionResponse(agent_name=definition.agent_name, display_name=definition.display_name) - for definition in await service.list_agents() - ] - ), - ) - - -@router.get("/active", response_model=BaseResponse[RunSummary | None]) -async def get_active_run( - current_user: CurrentUser, - agent_name: str = Query(..., min_length=1), - graph_id: uuid.UUID | None = Query(None), - thread_id: str | None = Query(None), - db: AsyncSession = Depends(get_db), -) -> BaseResponse[RunSummary | None]: - service = RunService(db) - run = await service.find_latest_active_run( - user_id=str(current_user.id), - agent_name=agent_name, - graph_id=graph_id, - thread_id=thread_id, - ) - return BaseResponse( - success=True, - code=200, - msg="ok", - data=_to_run_summary(run) if run else None, - ) - - -@router.get("/active/skill-creator", response_model=BaseResponse[RunSummary | None]) -async def get_active_skill_creator_run( - current_user: CurrentUser, - graph_id: uuid.UUID, - thread_id: str | None = Query(None), - db: AsyncSession = Depends(get_db), -) -> BaseResponse[RunSummary | None]: - return await get_active_run( - current_user=current_user, - agent_name="skill_creator", - graph_id=graph_id, - thread_id=thread_id, - db=db, - ) - - -@router.post("", response_model=BaseResponse[CreateRunResponse]) -async def create_run( - request: CreateRunRequest, - current_user: CurrentUser, - db: AsyncSession = Depends(get_db), -) -> BaseResponse[CreateRunResponse]: - service = RunService(db) - try: - run = await service.create_run( - user_id=str(current_user.id), - agent_name=request.agent_name, - graph_id=request.graph_id, - thread_id=request.thread_id, - message=request.message, - input=request.input, - ) - except ValueError as exc: - raise BadRequestException(str(exc)) - return BaseResponse( - success=True, - code=200, - msg="Run created", - data=CreateRunResponse( - run_id=run.id, - thread_id=run.thread_id or "", - status=run.status.value if hasattr(run.status, "value") else str(run.status), - ), - ) - - -@router.post("/skill-creator", response_model=BaseResponse[CreateRunResponse]) -async def create_skill_creator_run( - request: CreateSkillCreatorRunRequest, - current_user: CurrentUser, - db: AsyncSession = Depends(get_db), -) -> BaseResponse[CreateRunResponse]: - return await create_run( - request=CreateRunRequest( - agent_name="skill_creator", - graph_id=request.graph_id, - message=request.message, - thread_id=request.thread_id, - input={"edit_skill_id": request.edit_skill_id}, - ), - current_user=current_user, - db=db, - ) - - -@router.get("/{run_id}", response_model=BaseResponse[RunSummary]) -async def get_run( - run_id: uuid.UUID, - current_user: CurrentUser, - db: AsyncSession = Depends(get_db), -) -> BaseResponse[RunSummary]: - service = RunService(db) - run = await service.get_run(run_id, str(current_user.id)) - if run is None: - return BaseResponse(success=False, code=404, msg="Run not found", data=None) - return BaseResponse(success=True, code=200, msg="ok", data=_to_run_summary(run)) - - -@router.get("/{run_id}/snapshot", response_model=BaseResponse[RunSnapshotResponse]) -async def get_run_snapshot( - run_id: uuid.UUID, - current_user: CurrentUser, - db: AsyncSession = Depends(get_db), -) -> BaseResponse[RunSnapshotResponse]: - service = RunService(db) - snapshot = await service.get_snapshot(run_id, str(current_user.id)) - if snapshot is None: - return BaseResponse(success=False, code=404, msg="Snapshot not found", data=None) - return BaseResponse( - success=True, - code=200, - msg="ok", - data=RunSnapshotResponse( - run_id=run_id, - status=snapshot.status, - last_seq=snapshot.last_seq, - projection=snapshot.projection or {}, - ), - ) - - -@router.get("/{run_id}/events", response_model=BaseResponse[RunEventsPageResponse]) -async def get_run_events( - current_user: CurrentUser, - run_id: uuid.UUID, - after_seq: int = Query(0, ge=0), - limit: int = Query(500, ge=1, le=1000), - db: AsyncSession = Depends(get_db), -) -> BaseResponse[RunEventsPageResponse]: - service = RunService(db) - events = await service.list_events_after(run_id, str(current_user.id), after_seq=after_seq, limit=limit) - return BaseResponse( - success=True, - code=200, - msg="ok", - data=RunEventsPageResponse( - run_id=run_id, - events=[ - RunEventResponse( - seq=event.seq, - event_type=event.event_type, - payload=event.payload or {}, - trace_id=event.trace_id, - observation_id=event.observation_id, - parent_observation_id=event.parent_observation_id, - created_at=event.created_at, - ) - for event in events - ], - next_after_seq=events[-1].seq if events else after_seq, - ), - ) - - -@router.post("/{run_id}/cancel", response_model=BaseResponse[RunSummary]) -async def cancel_run( - run_id: uuid.UUID, - current_user: CurrentUser, - db: AsyncSession = Depends(get_db), -) -> BaseResponse[RunSummary]: - service = RunService(db) - run = await service.get_run(run_id, str(current_user.id)) - if run is None: - return BaseResponse(success=False, code=404, msg="Run not found", data=None) - - if run.thread_id and run.status in { - AgentRunStatus.QUEUED, - AgentRunStatus.RUNNING, - AgentRunStatus.INTERRUPT_WAIT, - }: - try: - await task_manager.stop_task(run.thread_id) - except Exception: - logger.debug("Failed to stop task for thread_id=%s during run cancellation", run.thread_id, exc_info=True) - - run = await service.mark_status( - run_id=run_id, - user_id=str(current_user.id), - status=AgentRunStatus.CANCELLED, - error_code="cancelled", - error_message="Cancelled by user", - ) - if run is None: - return BaseResponse(success=False, code=404, msg="Run not found", data=None) - return BaseResponse(success=True, code=200, msg="Run cancelled", data=_to_run_summary(run)) diff --git a/backend/app/api/v1/sandboxes.py b/backend/app/api/v1/sandboxes.py index 7cce489d2..f57308024 100644 --- a/backend/app/api/v1/sandboxes.py +++ b/backend/app/api/v1/sandboxes.py @@ -10,8 +10,8 @@ from sqlalchemy import func, select from sqlalchemy.ext.asyncio import AsyncSession +from app.common.app_errors import AccessDeniedError, InvalidRequestError, NotFoundError from app.common.dependencies import get_current_user -from app.common.exceptions import BadRequestException, ForbiddenException, NotFoundException from app.common.response import success_response from app.core.database import get_db from app.models.auth import AuthUser as User @@ -31,9 +31,9 @@ async def _verify_sandbox_ownership(sandbox_id: str, current_user: User, db: Asy result = await db.execute(select(UserSandbox).where(UserSandbox.id == sandbox_id)) sb = result.scalar_one_or_none() if not sb: - raise NotFoundException("Sandbox not found") + raise NotFoundError("Sandbox not found", code="SANDBOX_NOT_FOUND", data={"sandbox_id": sandbox_id}) if sb.user_id != str(current_user.id): - raise ForbiddenException("Access denied") + raise AccessDeniedError("Access denied", code="SANDBOX_ACCESS_DENIED", data={"sandbox_id": sandbox_id}) # Schemas @@ -156,7 +156,7 @@ async def get_sandbox( sb = result.scalar_one_or_none() if not sb: - raise NotFoundException("Sandbox not found") + raise NotFoundError("Sandbox not found", code="SANDBOX_NOT_FOUND", data={"sandbox_id": sandbox_id}) return SandboxResponse( id=sb.id, @@ -190,14 +190,18 @@ async def update_sandbox( if body.image is not None: s = body.image.strip() if not s: - raise BadRequestException("image cannot be empty") + raise InvalidRequestError("image cannot be empty", code="SANDBOX_IMAGE_EMPTY") if len(s) > 255: - raise BadRequestException("image must be at most 255 characters") + raise InvalidRequestError( + "image must be at most 255 characters", + code="SANDBOX_IMAGE_TOO_LONG", + data={"max_length": 255}, + ) image_value = s service = SandboxManagerService(db) success = await service.update_sandbox_config(sandbox_id, image=image_value) if not success: - raise NotFoundException("Sandbox not found") + raise NotFoundError("Sandbox not found", code="SANDBOX_NOT_FOUND", data={"sandbox_id": sandbox_id}) return success_response(message="Sandbox config updated") @@ -212,7 +216,11 @@ async def stop_sandbox( service = SandboxManagerService(db) success = await service.stop_sandbox(sandbox_id) if not success: - raise NotFoundException("Sandbox not found or already stopped") + raise NotFoundError( + "Sandbox not found or already stopped", + code="SANDBOX_STOP_TARGET_NOT_FOUND", + data={"sandbox_id": sandbox_id}, + ) return success_response(message="Sandbox stopped successfully") @@ -228,7 +236,7 @@ async def restart_sandbox( service = SandboxManagerService(db) success = await service.restart_sandbox(sandbox_id) if not success: - raise NotFoundException("Sandbox not found") + raise NotFoundError("Sandbox not found", code="SANDBOX_NOT_FOUND", data={"sandbox_id": sandbox_id}) return success_response(message="Sandbox scheduled for restart") @@ -244,7 +252,7 @@ async def rebuild_sandbox( service = SandboxManagerService(db) success = await service.rebuild_sandbox(sandbox_id) if not success: - raise NotFoundException("Sandbox not found") + raise NotFoundError("Sandbox not found", code="SANDBOX_NOT_FOUND", data={"sandbox_id": sandbox_id}) return success_response(message="Sandbox rebuilt successfully") @@ -259,6 +267,6 @@ async def delete_sandbox( service = SandboxManagerService(db) success = await service.delete_sandbox(sandbox_id) if not success: - raise NotFoundException("Sandbox not found") + raise NotFoundError("Sandbox not found", code="SANDBOX_NOT_FOUND", data={"sandbox_id": sandbox_id}) return success_response(message="Sandbox deleted successfully") diff --git a/backend/app/api/v1/sessions.py b/backend/app/api/v1/sessions.py deleted file mode 100644 index 7e056d574..000000000 --- a/backend/app/api/v1/sessions.py +++ /dev/null @@ -1,159 +0,0 @@ -""" -Module: Sessions API (based on SessionService) - -Overview: -- Provides session CRUD (create, list, get, update title, delete) -- Provides listing messages for a specific session -- Managed via SessionService - -Routes: -- POST /sessions/new_session: Create a session -- GET /sessions: List sessions for the current user -- GET /sessions/{session_id}: Get a specific session -- PATCH /sessions/{session_id}: Update session title -- DELETE /sessions/{session_id}: Delete a session -- GET /sessions/{session_id}/messages: Get messages for a session - -Dependencies: -- Session service: SessionService (Depends(get_session_service)) -- Database session: Session (Depends(get_db)) - -Requests/Responses: -- Request model: SessionCreate -- Response models: SessionResponse, MessageResponse -- Unified errors: HTTPException - -Error codes: -- 404: Session not found -- 400: Invalid parameters or business rule failure -""" - -from typing import List - -from fastapi import APIRouter, Depends -from sqlalchemy.ext.asyncio import AsyncSession - -from app.common.dependencies import CurrentUser -from app.common.exceptions import AppException, InternalServerException, NotFoundException -from app.core.database import get_db -from app.schemas.common import SessionCreate, SessionMessageResponse, SessionResponse -from app.services.session_service import SessionService - -router = APIRouter() - -# --- Session endpoints (based on SessionService) --- -# ----- Create ----- -# ----- Read ----- -# ----- Update ----- -# ----- Delete ----- -# ----- Messages ----- - - -def get_session_service(db: AsyncSession = Depends(get_db)) -> SessionService: - return SessionService(db) - - -@router.post("/new_session", response_model=SessionResponse) -async def create_session( - session_data: SessionCreate, - current_user: CurrentUser, - session_service: SessionService = Depends(get_session_service), -): - """Create a new session.""" - try: - return await session_service.create_session(session_data, user_id=current_user.id) - except AppException: - raise - except Exception as e: - raise InternalServerException("Failed to create session") from e - - -@router.get("/", response_model=List[SessionResponse]) -async def get_sessions( - current_user: CurrentUser, - session_service: SessionService = Depends(get_session_service), -): - """Get all sessions for the current user.""" - try: - return await session_service.get_user_sessions(user_id=current_user.id) - except AppException: - raise - except Exception as e: - raise InternalServerException("Failed to get sessions") from e - - -@router.get("/{session_id}", response_model=SessionResponse) -async def get_session( - session_id: str, - current_user: CurrentUser, - session_service: SessionService = Depends(get_session_service), -): - """Get a specific session.""" - session = await session_service.get_session_for_user(session_id, user_id=current_user.id) - if not session: - raise NotFoundException("Session not found") - return session - - -@router.patch("/{session_id}", response_model=SessionResponse) -async def update_session_title( - session_id: str, - title: str, - current_user: CurrentUser, - session_service: SessionService = Depends(get_session_service), -): - """Update session title.""" - try: - updated_session = await session_service.update_session_title(session_id, title, user_id=current_user.id) - if not updated_session: - raise NotFoundException("Session not found") - return updated_session - except AppException: - raise - except Exception as e: - raise InternalServerException("Failed to update session title") from e - - -@router.delete("/{session_id}") -async def delete_session( - session_id: str, - current_user: CurrentUser, - session_service: SessionService = Depends(get_session_service), -): - """Delete a session.""" - try: - success = await session_service.delete_session(session_id, user_id=current_user.id) - if not success: - raise NotFoundException("Session not found") - return {"success": True, "message": "Session deleted successfully"} - except AppException: - raise - except Exception as e: - raise InternalServerException("Failed to delete session") from e - - -@router.get("/{session_id}/messages", response_model=List[SessionMessageResponse]) -async def get_session_messages( - session_id: str, - current_user: CurrentUser, - limit: int = 100, - session_service: SessionService = Depends(get_session_service), -): - """Get messages for a session.""" - try: - messages = await session_service.get_session_messages(session_id, limit, user_id=current_user.id) - return [ - SessionMessageResponse( - id=msg.id, - session_id=msg.thread_id, - content=msg.content, - role=msg.role, - metadata=msg.meta_data, - created_at=msg.created_at, - ) - for msg in messages - ] - except AppException: - raise - except Exception as e: - raise InternalServerException("Failed to get session messages") from e diff --git a/backend/app/api/v1/skill_collaborators.py b/backend/app/api/v1/skill_collaborators.py index c0946a176..1b84368a2 100644 --- a/backend/app/api/v1/skill_collaborators.py +++ b/backend/app/api/v1/skill_collaborators.py @@ -7,8 +7,8 @@ from fastapi import APIRouter, Depends from sqlalchemy.ext.asyncio import AsyncSession +from app.common.app_errors import NotFoundError from app.common.dependencies import get_current_user -from app.common.exceptions import NotFoundException from app.core.database import get_db from app.models.auth import AuthUser as User from app.schemas.skill_collaborator import ( @@ -75,11 +75,15 @@ async def add_collaborator( user_service = UserService(db) user = await user_service.get_user_by_email(payload.email.strip()) if not user: - raise NotFoundException("User not found") + raise NotFoundError( + "User not found", + code="USER_NOT_FOUND", + data={"email": payload.email.strip()}, + ) target_user_id = user.id if not target_user_id: - raise NotFoundException("User not found") + raise NotFoundError("User not found", code="USER_NOT_FOUND") service = SkillCollaboratorService(db) collaborator = await service.add_collaborator( diff --git a/backend/app/api/v1/skills.py b/backend/app/api/v1/skills.py index 4d23dd4b5..f12ded802 100644 --- a/backend/app/api/v1/skills.py +++ b/backend/app/api/v1/skills.py @@ -25,15 +25,23 @@ ) from app.services.openclaw_instance_service import OpenClawInstanceService from app.services.skill_service import SkillService +from app.utils.safe_task import safe_create_task -async def _trigger_openclaw_skill_sync(user_id: str, db: AsyncSession): - """Trigger a sync of skills to the user's OpenClaw container if it is running.""" +async def _trigger_openclaw_skill_sync(user_id: str): + """Trigger a sync of skills to the user's OpenClaw container if it is running. + + Uses its own DB session since this runs as a background task after the + request-scoped session is closed. + """ + from app.core.database import AsyncSessionLocal + try: - instance_service = OpenClawInstanceService(db) - instance = await instance_service.get_instance_by_user(user_id) - if instance and instance.container_id and instance.status == InstanceStatus.RUNNING: - await instance_service.sync_skills_to_container(user_id, instance.container_id) + async with AsyncSessionLocal() as db: + instance_service = OpenClawInstanceService(db) + instance = await instance_service.get_instance_by_user(user_id) + if instance and instance.container_id and instance.status == InstanceStatus.RUNNING: + await instance_service.sync_skills_to_container(user_id, instance.container_id) except Exception as e: logger.error(f"Failed to trigger openclaw skill sync for user {user_id}: {e}", exc_info=True) @@ -94,9 +102,7 @@ async def create_skill( skill = await service.get_skill(skill.id, current_user.id) # Trigger sync to OpenClaw container - import asyncio - - asyncio.create_task(_trigger_openclaw_skill_sync(current_user.id, db)) + safe_create_task(_trigger_openclaw_skill_sync(current_user.id)) return { "success": True, @@ -154,9 +160,7 @@ async def update_skill( skill = await service.get_skill(skill.id, current_user.id) # Trigger sync to OpenClaw container - import asyncio - - asyncio.create_task(_trigger_openclaw_skill_sync(current_user.id, db)) + safe_create_task(_trigger_openclaw_skill_sync(current_user.id)) return { "success": True, @@ -174,12 +178,12 @@ async def delete_skill( service = SkillService(db) # get the skill name before deletion - from app.common.exceptions import NotFoundException + from app.common.app_errors import NotFoundError try: skill = await service.get_skill(skill_id, current_user.id) skill_name = skill.name - except NotFoundException: + except NotFoundError: skill_name = None await service.delete_skill(skill_id, current_user.id) @@ -187,25 +191,22 @@ async def delete_skill( if skill_name: # Trigger incremental sync to OpenClaw container async def _delete_from_container(): + from app.core.database import AsyncSessionLocal + try: - from app.services.openclaw_instance_service import OpenClawInstanceService - - instance_service = OpenClawInstanceService(db) - instance = await instance_service.get_instance_by_user(current_user.id) - if instance and instance.container_id and instance.status == InstanceStatus.RUNNING: - await instance_service.delete_skill_from_container( - current_user.id, instance.container_id, skill_name - ) + async with AsyncSessionLocal() as bg_db: + instance_service = OpenClawInstanceService(bg_db) + instance = await instance_service.get_instance_by_user(current_user.id) + if instance and instance.container_id and instance.status == InstanceStatus.RUNNING: + await instance_service.delete_skill_from_container( + current_user.id, instance.container_id, skill_name + ) except Exception as e: - from loguru import logger - logger.error( f"Failed to delete skill {skill_name} from container for user {current_user.id}: {e}", exc_info=True ) - import asyncio - - asyncio.create_task(_delete_from_container()) + safe_create_task(_delete_from_container()) return {"success": True} @@ -232,9 +233,7 @@ async def add_file( ) # Trigger sync to OpenClaw container - import asyncio - - asyncio.create_task(_trigger_openclaw_skill_sync(current_user.id, db)) + safe_create_task(_trigger_openclaw_skill_sync(current_user.id)) return { "success": True, @@ -253,9 +252,7 @@ async def delete_file( await service.delete_file(file_id, current_user.id) # Trigger sync to OpenClaw container - import asyncio - - asyncio.create_task(_trigger_openclaw_skill_sync(current_user.id, db)) + safe_create_task(_trigger_openclaw_skill_sync(current_user.id)) return {"success": True} @@ -278,9 +275,7 @@ async def update_file( ) # Trigger sync to OpenClaw container - import asyncio - - asyncio.create_task(_trigger_openclaw_skill_sync(current_user.id, db)) + safe_create_task(_trigger_openclaw_skill_sync(current_user.id)) return { "success": True, diff --git a/backend/app/api/v1/task_activities.py b/backend/app/api/v1/task_activities.py new file mode 100644 index 000000000..dacd021ec --- /dev/null +++ b/backend/app/api/v1/task_activities.py @@ -0,0 +1,167 @@ +"""Task Activities API.""" + +from __future__ import annotations + +import uuid + +from fastapi import APIRouter, Depends, Query +from sqlalchemy.ext.asyncio import AsyncSession + +from app.common.app_errors import NotFoundError +from app.common.dependencies import require_workspace_role +from app.core.database import get_db +from app.models.auth import AuthUser as User +from app.models.task_activity import ActivityAuthorType, ActivityType +from app.models.workspace import WorkspaceMemberRole +from app.schemas import BaseResponse +from app.schemas.task_activity import ( + CreateTaskActivityRequest, + TaskActivityListResponse, + TaskActivityResponse, + UpdateTaskActivityRequest, +) +from app.services.task_activity_service import TaskActivityService +from app.websocket.notification_manager import NotificationType, notification_manager + +router = APIRouter(prefix="/v1/tasks/{task_id}/activities", tags=["Task Activities"]) + + +@router.get("", response_model=BaseResponse[TaskActivityListResponse]) +async def list_activities( + task_id: uuid.UUID, + current_user: User = require_workspace_role(WorkspaceMemberRole.viewer), + workspace_id: uuid.UUID = Query(...), + cursor: str | None = Query(None), + limit: int = Query(50, ge=1, le=200), + db: AsyncSession = Depends(get_db), +) -> BaseResponse[TaskActivityListResponse]: + service = TaskActivityService(db) + activities, has_more, next_cursor = await service.list_activities( + task_id=task_id, + workspace_id=workspace_id, + cursor=cursor, + limit=limit, + ) + + return BaseResponse( + success=True, + code=200, + msg="ok", + data=TaskActivityListResponse( + items=[TaskActivityResponse.model_validate(a) for a in activities], + has_more=has_more, + next_cursor=next_cursor, + ), + ) + + +@router.post("", response_model=BaseResponse[TaskActivityResponse]) +async def create_activity( + task_id: uuid.UUID, + request: CreateTaskActivityRequest, + current_user: User = require_workspace_role(WorkspaceMemberRole.member), + workspace_id: uuid.UUID = Query(...), + db: AsyncSession = Depends(get_db), +) -> BaseResponse[TaskActivityResponse]: + service = TaskActivityService(db) + activity, task, should_dispatch, mentioned_agent_ids = await service.create_activity( + task_id=task_id, + workspace_id=workspace_id, + author_type=ActivityAuthorType.MEMBER, + author_id=str(current_user.id), + content=request.content, + activity_type=ActivityType.COMMENT, + parent_activity_id=request.parent_activity_id, + ) + + # Trigger executions via orchestrator + if should_dispatch or mentioned_agent_ids: + from app.services.dispatch_service import DispatchService + + dispatch = DispatchService(db) + if should_dispatch: + await dispatch.dispatch_task( + task_id=task.id, + user_id=str(current_user.id), + prompt_override=activity.content, + ) + if mentioned_agent_ids: + # For mentioned agents, dispatch the same task + await dispatch.dispatch_task( + task_id=task.id, + user_id=str(current_user.id), + ) + + # Push notification to task creator (if not the commenter) + if task.creator_id != str(current_user.id): + await notification_manager.send_to_user( + task.creator_id, + { + "type": NotificationType.TASK_ACTIVITY_ADDED.value, + "task_id": str(task_id), + "activity_id": str(activity.id), + "author_type": activity.author_type.value, + "author_id": activity.author_id, + }, + ) + + return BaseResponse( + success=True, + code=200, + msg="Activity created", + data=TaskActivityResponse.model_validate(activity), + ) + + +@router.patch("/{activity_id}", response_model=BaseResponse[TaskActivityResponse]) +async def update_activity( + task_id: uuid.UUID, + activity_id: uuid.UUID, + request: UpdateTaskActivityRequest, + current_user: User = require_workspace_role(WorkspaceMemberRole.member), + workspace_id: uuid.UUID = Query(...), + db: AsyncSession = Depends(get_db), +) -> BaseResponse[TaskActivityResponse]: + service = TaskActivityService(db) + activity = await service.update_activity( + activity_id=activity_id, + task_id=task_id, + workspace_id=workspace_id, + author_id=str(current_user.id), + content=request.content, + ) + + if not activity: + raise NotFoundError( + "Activity not found", code="TASK_ACTIVITY_NOT_FOUND", data={"activity_id": str(activity_id)} + ) + + return BaseResponse( + success=True, + code=200, + msg="Activity updated", + data=TaskActivityResponse.model_validate(activity), + ) + + +@router.delete("/{activity_id}", response_model=BaseResponse[None]) +async def delete_activity( + task_id: uuid.UUID, + activity_id: uuid.UUID, + current_user: User = require_workspace_role(WorkspaceMemberRole.member), + workspace_id: uuid.UUID = Query(...), + db: AsyncSession = Depends(get_db), +) -> BaseResponse[None]: + service = TaskActivityService(db) + deleted = await service.delete_activity( + activity_id=activity_id, + task_id=task_id, + workspace_id=workspace_id, + author_id=str(current_user.id), + ) + + if not deleted: + raise NotFoundError( + "Activity not found", code="TASK_ACTIVITY_NOT_FOUND", data={"activity_id": str(activity_id)} + ) + return BaseResponse(success=True, code=200, msg="Activity deleted", data=None) diff --git a/backend/app/api/v1/tasks.py b/backend/app/api/v1/tasks.py new file mode 100644 index 000000000..1fb51a336 --- /dev/null +++ b/backend/app/api/v1/tasks.py @@ -0,0 +1,245 @@ +"""Tasks API.""" + +from __future__ import annotations + +import uuid + +from fastapi import APIRouter, Depends, Query +from sqlalchemy.ext.asyncio import AsyncSession + +from app.common.app_errors import AccessDeniedError, InvalidRequestError, NotFoundError +from app.common.dependencies import CurrentUser, require_workspace_role +from app.core.database import get_db +from app.models.auth import AuthUser as User +from app.models.task import Task, TaskPriority +from app.models.workspace import WorkspaceMemberRole +from app.schemas import BaseResponse +from app.schemas.task import ( + AssignTaskRequest, + CreateTaskRequest, + DispatchTaskRequest, + TaskListResponse, + TaskSummary, + UpdateTaskRequest, +) +from app.services.task_service import TaskService +from app.services.workspace_permission import check_workspace_access + +router = APIRouter(prefix="/v1/tasks", tags=["Tasks"]) + + +def _to_summary(t: Task) -> TaskSummary: + return TaskSummary( + id=t.id, + workspace_id=t.workspace_id, + title=t.title, + description=t.description, + goal=t.goal, + status=t.status.value if hasattr(t.status, "value") else str(t.status), + priority=t.priority.value if hasattr(t.priority, "value") else str(t.priority), + agent_id=t.agent_id, + creator_id=t.creator_id, + latest_run_id=t.latest_run_id, + parent_task_id=t.parent_task_id, + tags=t.tags, + position=t.position, + auto_approve=t.auto_approve, + due_date=t.due_date, + created_at=t.created_at, + updated_at=t.updated_at, + ) + + +@router.get("", response_model=BaseResponse[TaskListResponse]) +async def list_tasks( + current_user: User = require_workspace_role(WorkspaceMemberRole.viewer), + workspace_id: uuid.UUID = Query(...), + status: str | None = Query(None), + parent_task_id: uuid.UUID | None = Query(None), + agent_id: uuid.UUID | None = Query(None), + limit: int = Query(50, ge=1, le=200), + db: AsyncSession = Depends(get_db), +) -> BaseResponse[TaskListResponse]: + service = TaskService(db) + tasks = await service.list_tasks( + workspace_id=workspace_id, + status=status, + parent_task_id=parent_task_id, + agent_id=agent_id, + limit=limit, + ) + return BaseResponse( + success=True, + code=200, + msg="ok", + data=TaskListResponse(items=[_to_summary(t) for t in tasks]), + ) + + +@router.post("", response_model=BaseResponse[TaskSummary]) +async def create_task( + request: CreateTaskRequest, + current_user: CurrentUser, + db: AsyncSession = Depends(get_db), +) -> BaseResponse[TaskSummary]: + service = TaskService(db) + + try: + priority = TaskPriority(request.priority) + except ValueError: + raise InvalidRequestError( + f"Invalid priority: {request.priority}", + code="TASK_PRIORITY_INVALID", + data={"priority": request.priority}, + ) + + has_access = await check_workspace_access(db, request.workspace_id, current_user, WorkspaceMemberRole.member) + if not has_access: + raise AccessDeniedError("No access to workspace", code="WORKSPACE_ACCESS_DENIED") + + task = await service.create_task( + workspace_id=request.workspace_id, + creator_id=str(current_user.id), + title=request.title, + description=request.description, + goal=request.goal, + priority=priority, + agent_id=request.agent_id, + parent_task_id=request.parent_task_id, + tags=request.tags, + position=request.position, + auto_approve=request.auto_approve, + ) + return BaseResponse(success=True, code=200, msg="Task created", data=_to_summary(task)) + + +@router.get("/meta/transitions", response_model=BaseResponse) +async def get_transitions( + current_user: User = require_workspace_role(WorkspaceMemberRole.viewer), + workspace_id: uuid.UUID = Query(...), +) -> BaseResponse: + return BaseResponse( + success=True, + code=200, + msg="ok", + data=TaskService.get_transitions(), + ) + + +@router.get("/{task_id}", response_model=BaseResponse[TaskSummary]) +async def get_task( + task_id: uuid.UUID, + current_user: User = require_workspace_role(WorkspaceMemberRole.viewer), + workspace_id: uuid.UUID = Query(...), + db: AsyncSession = Depends(get_db), +) -> BaseResponse[TaskSummary]: + service = TaskService(db) + task = await service.get_task(task_id, workspace_id) + if not task: + raise NotFoundError("Task not found", code="TASK_NOT_FOUND", data={"task_id": str(task_id)}) + return BaseResponse(success=True, code=200, msg="ok", data=_to_summary(task)) + + +@router.patch("/{task_id}", response_model=BaseResponse[TaskSummary]) +async def update_task( + task_id: uuid.UUID, + request: UpdateTaskRequest, + current_user: User = require_workspace_role(WorkspaceMemberRole.member), + workspace_id: uuid.UUID = Query(...), + db: AsyncSession = Depends(get_db), +) -> BaseResponse[TaskSummary]: + service = TaskService(db) + updates = request.model_dump(exclude_unset=True) + task = await service.update_task(task_id, workspace_id, **updates) + if not task: + raise NotFoundError("Task not found", code="TASK_NOT_FOUND", data={"task_id": str(task_id)}) + return BaseResponse(success=True, code=200, msg="Task updated", data=_to_summary(task)) + + +@router.post("/{task_id}/assign", response_model=BaseResponse[TaskSummary]) +async def assign_task( + task_id: uuid.UUID, + request: AssignTaskRequest, + current_user: User = require_workspace_role(WorkspaceMemberRole.member), + workspace_id: uuid.UUID = Query(...), + db: AsyncSession = Depends(get_db), +) -> BaseResponse[TaskSummary]: + service = TaskService(db) + task = await service.assign_to_agent( + task_id=task_id, + workspace_id=workspace_id, + agent_id=request.agent_id, + ) + return BaseResponse(success=True, code=200, msg="Task assigned", data=_to_summary(task)) + + +@router.post("/{task_id}/dispatch", response_model=BaseResponse[TaskSummary]) +async def dispatch_task( + task_id: uuid.UUID, + request: DispatchTaskRequest, + current_user: User = require_workspace_role(WorkspaceMemberRole.member), + workspace_id: uuid.UUID = Query(...), + db: AsyncSession = Depends(get_db), +) -> BaseResponse[TaskSummary]: + from app.services.dispatch_service import DispatchService + + dispatch = DispatchService(db) + await dispatch.dispatch_task( + task_id=task_id, + user_id=str(current_user.id), + ) + service = TaskService(db) + task = await service.get_task(task_id, workspace_id) + if task is None: + raise NotFoundError("Task not found", code="TASK_NOT_FOUND", data={"task_id": str(task_id)}) + return BaseResponse(success=True, code=200, msg="Task dispatched", data=_to_summary(task)) + + +@router.post("/{task_id}/cancel", response_model=BaseResponse[TaskSummary]) +async def cancel_task( + task_id: uuid.UUID, + current_user: User = require_workspace_role(WorkspaceMemberRole.member), + workspace_id: uuid.UUID = Query(...), + db: AsyncSession = Depends(get_db), +) -> BaseResponse[TaskSummary]: + from app.services.dispatch_service import DispatchService + + # Find the latest run for this task and cancel it + service = TaskService(db) + task = await service.get_task(task_id, workspace_id) + if not task: + raise NotFoundError("Task not found", code="TASK_NOT_FOUND", data={"task_id": str(task_id)}) + + if task.latest_run_id: + # Cancel the run through the orchestrator, which will auto-sync task status + dispatch = DispatchService(db) + try: + await dispatch.cancel_run(task.latest_run_id) + except Exception: + pass # Run may already be in a terminal state + else: + # No active run — safe to transition status directly via service + await service.cancel_task(task) + + await db.refresh(task) + return BaseResponse(success=True, code=200, msg="Task cancelled", data=_to_summary(task)) + + +@router.get("/{task_id}/runs") +async def list_task_runs( + task_id: uuid.UUID, + current_user: User = require_workspace_role(WorkspaceMemberRole.viewer), + workspace_id: uuid.UUID = Query(...), + db: AsyncSession = Depends(get_db), +) -> BaseResponse: + from app.schemas.agent_run import AgentRunResponse + from app.services.agent_run_service import AgentRunService + + service = AgentRunService(db) + runs = await service.list_runs(workspace_id=workspace_id, task_id=task_id) + return BaseResponse( + success=True, + code=200, + msg="ok", + data=[AgentRunResponse.model_validate(r) for r in runs], + ) diff --git a/backend/app/api/v1/threads.py b/backend/app/api/v1/threads.py new file mode 100644 index 000000000..ca11dc000 --- /dev/null +++ b/backend/app/api/v1/threads.py @@ -0,0 +1,244 @@ +"""Threads API.""" + +from __future__ import annotations + +import uuid +from typing import List + +from fastapi import APIRouter, Depends, Query +from sqlalchemy import select +from sqlalchemy.ext.asyncio import AsyncSession + +from app.common.app_errors import AccessDeniedError, InvalidRequestError +from app.common.dependencies import CurrentUser, require_workspace_role +from app.core.database import get_db +from app.models.agent_run import AgentRun +from app.models.auth import AuthUser as User +from app.models.execution import Artifact, Execution +from app.models.workspace import WorkspaceMemberRole +from app.schemas import BaseResponse +from app.schemas.artifact import ArtifactResponse +from app.schemas.thread import ( + ChatRequest, + ChatResponse, + CreateThreadRequest, + ThreadEventResponse, + ThreadEventsListResponse, + ThreadResponse, + ThreadSummary, + UpdateThreadRequest, +) +from app.services.dispatch_service import DispatchService +from app.services.thread_service import ThreadService +from app.services.workspace_permission import check_workspace_access + +router = APIRouter(prefix="/v1/threads", tags=["Threads"]) + + +# --------------------------------------------------------------------------- +# Thread routes +# --------------------------------------------------------------------------- + + +@router.get("", response_model=BaseResponse[List[ThreadSummary]]) +async def list_threads( + agent_id: uuid.UUID = Query(...), + current_user: User = require_workspace_role(WorkspaceMemberRole.viewer), + workspace_id: uuid.UUID = Query(...), + db: AsyncSession = Depends(get_db), +) -> BaseResponse[List[ThreadSummary]]: + service = ThreadService(db) + threads = await service.list_threads(agent_id) + return BaseResponse( + success=True, + code=200, + msg="ok", + data=[ThreadSummary.model_validate(t) for t in threads], + ) + + +@router.post("", response_model=BaseResponse[ThreadResponse]) +async def create_thread( + request: CreateThreadRequest, + current_user: CurrentUser, + workspace_id: uuid.UUID = Query(...), + db: AsyncSession = Depends(get_db), +) -> BaseResponse[ThreadResponse]: + has_access = await check_workspace_access(db, workspace_id, current_user, WorkspaceMemberRole.member) + if not has_access: + raise AccessDeniedError("No access to workspace", code="WORKSPACE_ACCESS_DENIED") + + service = ThreadService(db) + thread = await service.create_thread(workspace_id, str(current_user.id), request) + return BaseResponse(success=True, code=200, msg="Thread created", data=ThreadResponse.model_validate(thread)) + + +@router.get("/{thread_id}", response_model=BaseResponse[ThreadResponse]) +async def get_thread( + thread_id: uuid.UUID, + current_user: User = require_workspace_role(WorkspaceMemberRole.viewer), + workspace_id: uuid.UUID = Query(...), + db: AsyncSession = Depends(get_db), +) -> BaseResponse[ThreadResponse]: + service = ThreadService(db) + thread = await service.get_thread(thread_id) + return BaseResponse(success=True, code=200, msg="ok", data=ThreadResponse.model_validate(thread)) + + +@router.patch("/{thread_id}", response_model=BaseResponse[ThreadResponse]) +async def update_thread( + thread_id: uuid.UUID, + request: UpdateThreadRequest, + current_user: User = require_workspace_role(WorkspaceMemberRole.member), + workspace_id: uuid.UUID = Query(...), + db: AsyncSession = Depends(get_db), +) -> BaseResponse[ThreadResponse]: + service = ThreadService(db) + thread = await service.update_thread(thread_id, request) + return BaseResponse(success=True, code=200, msg="Thread updated", data=ThreadResponse.model_validate(thread)) + + +@router.delete("/{thread_id}", response_model=BaseResponse[ThreadResponse]) +async def archive_thread( + thread_id: uuid.UUID, + current_user: User = require_workspace_role(WorkspaceMemberRole.member), + workspace_id: uuid.UUID = Query(...), + db: AsyncSession = Depends(get_db), +) -> BaseResponse[ThreadResponse]: + service = ThreadService(db) + thread = await service.archive_thread(thread_id) + return BaseResponse(success=True, code=200, msg="Thread archived", data=ThreadResponse.model_validate(thread)) + + +# --------------------------------------------------------------------------- +# Chat: send message + dispatch agent run (single call) +# --------------------------------------------------------------------------- + + +@router.post("/{thread_id}/chat", response_model=BaseResponse[ChatResponse]) +async def chat( + thread_id: uuid.UUID, + request: ChatRequest, + current_user: CurrentUser, + workspace_id: uuid.UUID = Query(...), + db: AsyncSession = Depends(get_db), +) -> BaseResponse[ChatResponse]: + """Send a user message and dispatch an agent run in one call. + + The user message flows through the event bus as a `user_message` event. + ExecutionEvent is the single source of truth. + """ + has_access = await check_workspace_access( + db, + workspace_id, + current_user, + WorkspaceMemberRole.member, + ) + if not has_access: + raise AccessDeniedError("No access to workspace", code="WORKSPACE_ACCESS_DENIED") + + active_run = ( + await db.execute( + select(AgentRun).where( + AgentRun.thread_id == thread_id, + AgentRun.status.in_(["pending", "running"]), + ) + ) + ).scalar_one_or_none() + if active_run: + raise InvalidRequestError( + "Thread has an active run, please wait for it to complete", + code="THREAD_ACTIVE_RUN_EXISTS", + data={"thread_id": str(thread_id)}, + ) + + # 1. Create run + execution first (so we have execution_id for the event) + dispatch = DispatchService(db) + run = await dispatch.dispatch_chat( + thread_id=thread_id, + message=request.message, + user_id=str(current_user.id), + ) + + # 2. Emit user_message as the first event in this execution. + attachments = [att.model_dump() for att in request.attachments] if request.attachments else None + # dispatch_chat always creates an execution + assert run.current_execution_id is not None + await dispatch.emit_user_message( + run=run, + execution_id=run.current_execution_id, + message=request.message, + attachments=attachments, + ) + + return BaseResponse( + success=True, + code=200, + msg="Chat dispatched", + data=ChatResponse( + run_id=run.id, + execution_id=run.current_execution_id, + ), + ) + + +# --------------------------------------------------------------------------- +# Thread artifacts: aggregate across all runs/executions +# --------------------------------------------------------------------------- + + +@router.get("/{thread_id}/artifacts", response_model=BaseResponse[List[ArtifactResponse]]) +async def list_thread_artifacts( + thread_id: uuid.UUID, + current_user: User = require_workspace_role(WorkspaceMemberRole.viewer), + workspace_id: uuid.UUID = Query(...), + db: AsyncSession = Depends(get_db), +) -> BaseResponse[List[ArtifactResponse]]: + """List all artifacts produced by runs in this thread.""" + artifacts = ( + ( + await db.execute( + select(Artifact) + .join(Execution, Artifact.execution_id == Execution.id) + .join(AgentRun, Execution.run_id == AgentRun.id) + .where(AgentRun.thread_id == thread_id) + .order_by(Artifact.created_at.desc()) + ) + ) + .scalars() + .all() + ) + return BaseResponse( + success=True, + code=200, + msg="ok", + data=[ArtifactResponse.model_validate(a) for a in artifacts], + ) + + +# --------------------------------------------------------------------------- +# Thread events: aggregate execution events across all runs +# --------------------------------------------------------------------------- + + +@router.get("/{thread_id}/events", response_model=BaseResponse[ThreadEventsListResponse]) +async def list_thread_events( + thread_id: uuid.UUID, + current_user: User = require_workspace_role(WorkspaceMemberRole.viewer), + workspace_id: uuid.UUID = Query(...), + after: uuid.UUID | None = Query(None, description="Cursor: event ID to paginate after"), + limit: int = Query(200, ge=1, le=500), + db: AsyncSession = Depends(get_db), +) -> BaseResponse[ThreadEventsListResponse]: + """List aggregated execution events across all runs in this thread.""" + service = ThreadService(db) + events, total = await service.list_thread_events(thread_id, after_id=after, limit=limit) + return BaseResponse( + success=True, + code=200, + msg="ok", + data=ThreadEventsListResponse( + events=[ThreadEventResponse(**e) for e in events], + total=total, + ), + ) diff --git a/backend/app/api/v1/tokens.py b/backend/app/api/v1/tokens.py index 6a2d2757d..a4509a92d 100644 --- a/backend/app/api/v1/tokens.py +++ b/backend/app/api/v1/tokens.py @@ -11,8 +11,8 @@ from fastapi import APIRouter, Depends, Query from sqlalchemy.ext.asyncio import AsyncSession +from app.common.app_errors import InvalidRequestError from app.common.dependencies import get_current_user -from app.common.exceptions import BadRequestException from app.core.database import get_db from app.models.auth import AuthUser as User from app.schemas.platform_token import ( @@ -68,7 +68,11 @@ async def list_tokens( parsed_resource_id = None if resource_id is not None: if not is_valid_uuid(resource_id): - raise BadRequestException("Invalid resource_id: must be a valid UUID") + raise InvalidRequestError( + "Invalid resource_id: must be a valid UUID", + code="TOKEN_RESOURCE_ID_INVALID", + data={"resource_id": resource_id}, + ) parsed_resource_id = uuid.UUID(resource_id) service = PlatformTokenService(db) diff --git a/backend/app/api/v1/tools.py b/backend/app/api/v1/tools.py index 2c63f0874..64a56cddc 100644 --- a/backend/app/api/v1/tools.py +++ b/backend/app/api/v1/tools.py @@ -9,6 +9,7 @@ from fastapi import APIRouter, Depends, Query from sqlalchemy.ext.asyncio import AsyncSession +from app.common.app_errors import NotFoundError from app.common.dependencies import get_current_user from app.common.response import success_response from app.core.database import get_db @@ -97,7 +98,7 @@ async def get_tool( tool = service.get_tool_by_key(tool_id) if not tool: - return success_response(data=None, message="Tool not found") + raise NotFoundError("Tool not found", code="TOOL_NOT_FOUND", data={"tool_id": tool_id}) return success_response( data=tool.to_response(), diff --git a/backend/app/api/v1/traces.py b/backend/app/api/v1/traces.py index e12ad1cb9..ddff7dc49 100644 --- a/backend/app/api/v1/traces.py +++ b/backend/app/api/v1/traces.py @@ -1,282 +1,180 @@ """ Traces API (path: /api/v1/traces) -Query historical execution trace data. Supports trace listing, single trace detail, and observation listing. +Query historical execution trace data. Supports trace listing, single trace +detail, and observation listing. """ +from __future__ import annotations + import uuid -from datetime import datetime from typing import Any, Optional -from fastapi import APIRouter, Depends, Query, Request -from loguru import logger -from pydantic import BaseModel +from fastapi import APIRouter, Depends, Query +from sqlalchemy import select from sqlalchemy.ext.asyncio import AsyncSession -from app.common.dependencies import CurrentUser -from app.common.exceptions import ForbiddenException +from app.common.app_errors import AccessDeniedError, NotFoundError +from app.common.dependencies import CurrentUser, require_workspace_role from app.core.database import get_db +from app.core.observation.model import Observation, Trace +from app.models.auth import AuthUser as User +from app.models.workspace import WorkspaceMemberRole from app.schemas import BaseResponse -from app.services.trace_service import TraceService +from app.services.workspace_permission import check_workspace_access router = APIRouter(prefix="/v1/traces", tags=["Traces"]) -def _bind_log(request: Request, **kwargs): - trace_id = getattr(request.state, "trace_id", "-") - return logger.bind(trace_id=trace_id, **kwargs) - - # ==================== Response Schemas ==================== -class ObservationSchema(BaseModel): - """Observation response schema.""" - - id: str - trace_id: str - parent_observation_id: Optional[str] = None - type: str - name: Optional[str] = None - level: str = "DEFAULT" - status: str = "RUNNING" - status_message: Optional[str] = None - start_time: Optional[datetime] = None - end_time: Optional[datetime] = None - duration_ms: Optional[int] = None - input: Optional[Any] = None - output: Optional[Any] = None - model_name: Optional[str] = None - model_provider: Optional[str] = None - model_parameters: Optional[Any] = None - prompt_tokens: Optional[int] = None - completion_tokens: Optional[int] = None - total_tokens: Optional[int] = None - input_cost: Optional[float] = None - output_cost: Optional[float] = None - total_cost: Optional[float] = None - metadata: Optional[Any] = None - version: Optional[str] = None +class TraceSchema(BaseResponse): + """Single trace representation (field-level, not the envelope).""" class Config: from_attributes = True -class TraceSchema(BaseModel): - """Trace response schema.""" - - id: str - workspace_id: Optional[str] = None - graph_id: Optional[str] = None - thread_id: Optional[str] = None - user_id: Optional[str] = None - name: Optional[str] = None - status: str - input: Optional[Any] = None - output: Optional[Any] = None - start_time: Optional[datetime] = None - end_time: Optional[datetime] = None - duration_ms: Optional[int] = None - total_tokens: Optional[int] = None - total_cost: Optional[float] = None - metadata: Optional[Any] = None - tags: Optional[list] = None - created_at: Optional[datetime] = None - - class Config: - from_attributes = True - - -class TraceDetailSchema(BaseModel): - """Trace detail (with observations).""" - - trace: TraceSchema - observations: list[ObservationSchema] - - -class TraceListSchema(BaseModel): - """Trace list.""" - - traces: list[TraceSchema] - total: int - - -# ==================== Helper ==================== - - -def _trace_to_schema(trace) -> TraceSchema: - """Convert a Trace ORM object to a schema.""" - return TraceSchema( - id=str(trace.id), - workspace_id=str(trace.workspace_id) if trace.workspace_id else None, - graph_id=str(trace.graph_id) if trace.graph_id else None, - thread_id=trace.thread_id, - user_id=trace.user_id, - name=trace.name, - status=trace.status.value if hasattr(trace.status, "value") else str(trace.status), - input=trace.input, - output=trace.output, - start_time=trace.start_time, - end_time=trace.end_time, - duration_ms=trace.duration_ms, - total_tokens=trace.total_tokens, - total_cost=trace.total_cost, - metadata=trace.metadata_, - tags=trace.tags, - created_at=trace.created_at, - ) - - -def _obs_to_schema(obs) -> ObservationSchema: - """Convert an Observation ORM object to a schema.""" - return ObservationSchema( - id=str(obs.id), - trace_id=str(obs.trace_id), - parent_observation_id=str(obs.parent_observation_id) if obs.parent_observation_id else None, - type=obs.type.value if hasattr(obs.type, "value") else str(obs.type), - name=obs.name, - level=obs.level.value if hasattr(obs.level, "value") else str(obs.level), - status=obs.status.value if hasattr(obs.status, "value") else str(obs.status), - status_message=obs.status_message, - start_time=obs.start_time, - end_time=obs.end_time, - duration_ms=obs.duration_ms, - input=obs.input, - output=obs.output, - model_name=obs.model_name, - model_provider=obs.model_provider, - model_parameters=obs.model_parameters, - prompt_tokens=obs.prompt_tokens, - completion_tokens=obs.completion_tokens, - total_tokens=obs.total_tokens, - input_cost=obs.input_cost, - output_cost=obs.output_cost, - total_cost=obs.total_cost, - metadata=obs.metadata_, - version=obs.version, - ) - - # ==================== Endpoints ==================== @router.get("", response_model=BaseResponse) async def list_traces( - request: Request, - current_user: CurrentUser, + current_user: User = require_workspace_role(WorkspaceMemberRole.viewer), + workspace_id: uuid.UUID = Query(..., description="Filter by Workspace ID (required)"), + agent_version_id: Optional[uuid.UUID] = Query(None, description="Filter by Agent Version ID"), + page: int = Query(1, ge=1, description="Page number (1-based)"), + page_size: int = Query(20, ge=1, le=100, description="Page size"), db: AsyncSession = Depends(get_db), - graph_id: Optional[uuid.UUID] = Query(None, description="Filter by Graph ID"), - workspace_id: Optional[uuid.UUID] = Query(None, description="Filter by Workspace ID"), - thread_id: Optional[str] = Query(None, description="Filter by Thread ID"), - limit: int = Query(20, ge=1, le=100, description="Page size"), - offset: int = Query(0, ge=0, description="Offset"), -): - """List traces (paginated).""" - log = _bind_log(request, user_id=str(current_user.id)) - service = TraceService(db) - - if workspace_id: - from app.models.workspace import WorkspaceMemberRole - from app.services.workspace_permission import check_workspace_access - - has_access = await check_workspace_access(db, workspace_id, current_user, WorkspaceMemberRole.viewer) - if not has_access: - raise ForbiddenException("No access to workspace traces") - - total = await service.count_traces(graph_id=graph_id, workspace_id=workspace_id, thread_id=thread_id) - traces = await service.list_traces( - graph_id=graph_id, - workspace_id=workspace_id, - thread_id=thread_id, - limit=limit, - offset=offset, - ) +) -> BaseResponse: + """List traces, sorted by created_at DESC.""" + stmt = select(Trace).where(Trace.workspace_id == workspace_id).order_by(Trace.created_at.desc()) + if agent_version_id is not None: + stmt = stmt.where(Trace.agent_version_id == agent_version_id) - log.debug( - f"Listed {len(traces)} traces (total={total}) | workspace_id={workspace_id} graph_id={graph_id} thread_id={thread_id}" - ) + offset = (page - 1) * page_size + stmt = stmt.limit(page_size).offset(offset) + result = await db.execute(stmt) + traces = list(result.scalars().all()) return BaseResponse( success=True, code=200, msg="ok", - data={ - "traces": [_trace_to_schema(t).model_dump(mode="json") for t in traces], - "total": total, - }, + data=[_trace_to_dict(t) for t in traces], ) @router.get("/{trace_id}", response_model=BaseResponse) -async def get_trace_detail( +async def get_trace( trace_id: uuid.UUID, - request: Request, current_user: CurrentUser, db: AsyncSession = Depends(get_db), -): - """Get a single trace's detail with all observations.""" - log = _bind_log(request, user_id=str(current_user.id)) - service = TraceService(db) - - trace = await service.get_trace(trace_id) +) -> BaseResponse: + """Get a single Trace record by ID.""" + trace = (await db.execute(select(Trace).where(Trace.id == trace_id))).scalar_one_or_none() if trace is None: - return BaseResponse(success=False, code=404, msg="Trace not found", data=None) - - if trace.workspace_id: - from app.models.workspace import WorkspaceMemberRole - from app.services.workspace_permission import check_workspace_access - - has_access = await check_workspace_access(db, trace.workspace_id, current_user, WorkspaceMemberRole.viewer) - if not has_access: - raise ForbiddenException("No access to workspace traces") - - observations = await service.get_observations_for_trace(trace_id) + raise NotFoundError("Trace not found", code="TRACE_NOT_FOUND", data={"trace_id": str(trace_id)}) - log.debug(f"Fetched trace {trace_id} with {len(observations)} observations") + if not await check_workspace_access(db, trace.workspace_id, current_user, WorkspaceMemberRole.viewer): + raise AccessDeniedError("Insufficient workspace permission", code="WORKSPACE_PERMISSION_DENIED") return BaseResponse( success=True, code=200, msg="ok", - data={ - "trace": _trace_to_schema(trace).model_dump(mode="json"), - "observations": [_obs_to_schema(o).model_dump(mode="json") for o in observations], - }, + data=_trace_to_dict(trace), ) @router.get("/{trace_id}/observations", response_model=BaseResponse) async def get_trace_observations( trace_id: uuid.UUID, - request: Request, current_user: CurrentUser, + type: Optional[str] = Query(None, description="Filter by observation type (e.g. GENERATION)"), db: AsyncSession = Depends(get_db), -): - """Get a flat list of observations for a trace (sorted by time).""" - log = _bind_log(request, user_id=str(current_user.id)) - service = TraceService(db) - - trace = await service.get_trace(trace_id) +) -> BaseResponse: + """Return a flat list of observations for a trace, optionally filtered by type.""" + trace = (await db.execute(select(Trace).where(Trace.id == trace_id))).scalar_one_or_none() if trace is None: - return BaseResponse(success=False, code=404, msg="Trace not found", data=None) - - if trace.workspace_id: - from app.models.workspace import WorkspaceMemberRole - from app.services.workspace_permission import check_workspace_access + raise NotFoundError("Trace not found", code="TRACE_NOT_FOUND", data={"trace_id": str(trace_id)}) - has_access = await check_workspace_access(db, trace.workspace_id, current_user, WorkspaceMemberRole.viewer) - if not has_access: - raise ForbiddenException("No access to workspace traces") + if not await check_workspace_access(db, trace.workspace_id, current_user, WorkspaceMemberRole.viewer): + raise AccessDeniedError("Insufficient workspace permission", code="WORKSPACE_PERMISSION_DENIED") - observations = await service.get_observations_for_trace(trace_id) + stmt = select(Observation).where(Observation.trace_id == trace_id).order_by(Observation.start_time.asc()) + if type is not None: + stmt = stmt.where(Observation.type == type) - log.debug(f"Fetched {len(observations)} observations for trace {trace_id}") + result = await db.execute(stmt) + observations = list(result.scalars().all()) return BaseResponse( success=True, code=200, msg="ok", - data={ - "observations": [_obs_to_schema(o).model_dump(mode="json") for o in observations], - }, + data=[_observation_to_dict(o) for o in observations], ) + + +# ==================== Helpers ==================== + + +def _trace_to_dict(trace: Trace) -> dict[str, Any]: + return { + "id": str(trace.id), + "name": trace.name, + "workspace_id": str(trace.workspace_id), + "execution_id": str(trace.execution_id), + "agent_version_id": str(trace.agent_version_id), + "user_id": str(trace.user_id), + "status": trace.status, + "input": trace.input, + "output": trace.output, + "metadata": trace.meta, + "start_time": trace.start_time.isoformat() if trace.start_time else None, + "end_time": trace.end_time.isoformat() if trace.end_time else None, + "duration_ms": trace.duration_ms, + "total_observations": trace.total_observations, + "total_tokens": trace.total_tokens, + "total_cost": float(trace.total_cost) if trace.total_cost is not None else None, + "tags": trace.tags, + "session_id": trace.session_id, + "environment": trace.environment, + "release": trace.release, + "version": trace.version, + "bookmarked": trace.bookmarked, + "public": trace.public, + "created_at": trace.created_at.isoformat() if trace.created_at else None, + } + + +def _observation_to_dict(obs: Observation) -> dict[str, Any]: + return { + "id": str(obs.id), + "trace_id": str(obs.trace_id), + "parent_observation_id": str(obs.parent_observation_id) if obs.parent_observation_id else None, + "type": obs.type, + "name": obs.name, + "level": obs.level, + "status_message": obs.status_message, + "environment": obs.environment, + "start_time": obs.start_time.isoformat() if obs.start_time else None, + "end_time": obs.end_time.isoformat() if obs.end_time else None, + "completion_start_time": obs.completion_start_time.isoformat() if obs.completion_start_time else None, + "input": obs.input, + "output": obs.output, + "metadata": obs.meta, + "model": obs.model, + "model_parameters": obs.model_parameters, + "usage_details": obs.usage_details, + "cost_details": obs.cost_details, + "prompt_name": obs.prompt_name, + "prompt_version": obs.prompt_version, + "tool_definitions": obs.tool_definitions, + "tool_calls": obs.tool_calls, + "execution_id": str(obs.execution_id), + "workspace_id": str(obs.workspace_id), + "created_at": obs.created_at.isoformat() if obs.created_at else None, + } diff --git a/backend/app/api/v1/users.py b/backend/app/api/v1/users.py index 0a521dcdd..c738d309e 100644 --- a/backend/app/api/v1/users.py +++ b/backend/app/api/v1/users.py @@ -9,14 +9,13 @@ from sqlalchemy import select from sqlalchemy.ext.asyncio import AsyncSession +from app.common.app_errors import AccessDeniedError, NotFoundError from app.common.dependencies import get_current_user -from app.common.exceptions import ForbiddenException, NotFoundException from app.common.response import success_response from app.core.database import get_db from app.models.auth import AuthUser as User from app.models.settings import Settings from app.services.user_service import UserService -from app.services.workspace_file_service import WorkspaceFileService router = APIRouter(prefix="/v1/users", tags=["Users"]) @@ -192,22 +191,6 @@ async def update_settings( } -@router.get("/me/usage-limits") -async def get_usage_limits( - db: AsyncSession = Depends(get_db), - current_user: User = Depends(get_current_user), -): - """Get current user's storage usage (workspace files).""" - service = WorkspaceFileService(db) - storage = await service.get_user_storage_usage(current_user) - usage = {"plan": "standard"} - base = success_response( - data={"storage": storage, "usage": usage}, - message="Fetched storage usage", - ) - return {**base, "storage": storage, "usage": usage} - - @router.get("/{user_id}", response_model=UserResponse) async def get_user( user_id: str, @@ -216,12 +199,12 @@ async def get_user( ): """Get user by ID (requires superuser permission).""" if not current_user.is_superuser: - raise ForbiddenException("Forbidden") + raise AccessDeniedError("Forbidden", code="SUPERUSER_REQUIRED") service = UserService(db) user = await service.get_user_by_id(user_id) if not user: - raise NotFoundException("User not found") + raise NotFoundError("User not found", code="USER_NOT_FOUND", data={"user_id": user_id}) return success_response( data=_user_to_response(user), @@ -238,7 +221,7 @@ async def list_users( ): """Search/list users (requires superuser permission).""" if not current_user.is_superuser: - raise ForbiddenException("Forbidden") + raise AccessDeniedError("Forbidden", code="SUPERUSER_REQUIRED") service = UserService(db) if keyword: @@ -260,7 +243,7 @@ async def create_user( ): """Create a new user (requires superuser permission).""" if not current_user.is_superuser: - raise ForbiddenException("Forbidden") + raise AccessDeniedError("Forbidden", code="SUPERUSER_REQUIRED") service = UserService(db) user = await service.create_user( @@ -286,12 +269,12 @@ async def update_user( ): """Update user info (requires superuser permission).""" if not current_user.is_superuser: - raise ForbiddenException("Forbidden") + raise AccessDeniedError("Forbidden", code="SUPERUSER_REQUIRED") service = UserService(db) user = await service.get_user_by_id(user_id) if not user: - raise NotFoundException("User not found") + raise NotFoundError("User not found", code="USER_NOT_FOUND", data={"user_id": user_id}) updated_user = await service.update_user( user, @@ -317,7 +300,7 @@ async def delete_user( ): """Delete a user (requires superuser permission).""" if not current_user.is_superuser: - raise ForbiddenException("Forbidden") + raise AccessDeniedError("Forbidden", code="SUPERUSER_REQUIRED") service = UserService(db) await service.delete_user(user_id) diff --git a/backend/app/api/v1/workspace_files.py b/backend/app/api/v1/workspace_files.py deleted file mode 100644 index 96b652616..000000000 --- a/backend/app/api/v1/workspace_files.py +++ /dev/null @@ -1,134 +0,0 @@ -""" -Workspace file management API (versioned path: /api/v1/workspaces) -""" - -import uuid -from typing import Optional - -from fastapi import APIRouter, Depends, File, Query, Request, UploadFile -from fastapi.responses import FileResponse, JSONResponse -from sqlalchemy.ext.asyncio import AsyncSession - -from app.common.dependencies import get_current_user_optional, require_workspace_role -from app.common.exceptions import AppException, ConflictException -from app.common.response import success_response -from app.core.database import get_db -from app.core.settings import settings -from app.models.auth import AuthUser as User -from app.models.workspace import WorkspaceMemberRole -from app.services.workspace_file_service import WorkspaceFileService - -router = APIRouter(prefix="/v1/workspaces", tags=["WorkspaceFiles"]) - - -@router.get("/{workspace_id}/files") -async def list_workspace_files( - workspace_id: uuid.UUID, - db: AsyncSession = Depends(get_db), - current_user: User = require_workspace_role(WorkspaceMemberRole.viewer), -): - service = WorkspaceFileService(db) - files = await service.list_files(workspace_id, current_user) - # compatible with frontend reading files directly, while preserving unified response format - base = success_response(data={"files": files}, message="Fetched workspace files") - return {**base, "files": files} - - -@router.post("/{workspace_id}/files") -async def upload_workspace_file( - workspace_id: uuid.UUID, - file: UploadFile = File(..., description="File to upload"), - db: AsyncSession = Depends(get_db), - current_user: User = require_workspace_role(WorkspaceMemberRole.member), -): - # Duplicate file returns 409 + isDuplicate with error field for frontend - try: - service = WorkspaceFileService(db) - record = await service.upload_file(workspace_id, file, current_user) - base = success_response(data={"file": record}, message="File uploaded") - return {**base, "file": record} - except ConflictException as exc: - return JSONResponse( - status_code=exc.status_code, - content={ - "success": False, - "error": str(exc.detail), - "isDuplicate": True, - }, - ) - except AppException as exc: - # Return error field alongside unified response (success=false) for frontend compatibility - return JSONResponse( - status_code=exc.status_code, - content={ - "success": False, - "error": str(exc.detail), - }, - ) - - -@router.delete("/{workspace_id}/files/{file_id}") -async def delete_workspace_file( - workspace_id: uuid.UUID, - file_id: uuid.UUID, - db: AsyncSession = Depends(get_db), - current_user: User = require_workspace_role(WorkspaceMemberRole.member), -): - service = WorkspaceFileService(db) - await service.delete_file(workspace_id, file_id, current_user) - # Include top-level success field for frontend compatibility - base = success_response(message="File deleted", data={"fileId": str(file_id)}) - return {**base, "success": True} - - -@router.post("/{workspace_id}/files/{file_id}/download") -async def generate_workspace_file_download_url( - workspace_id: uuid.UUID, - file_id: uuid.UUID, - request: Request, - db: AsyncSession = Depends(get_db), - current_user: User = require_workspace_role(WorkspaceMemberRole.viewer), -): - service = WorkspaceFileService(db) - url = await service.generate_download_url(workspace_id, file_id, current_user) - record = await service.get_file_record(workspace_id, file_id) - - # Generate absolute downloadUrl from the request base URL - base_url = str(request.base_url).rstrip("/") - download_url = f"{base_url}{url}" - viewer_url = f"{settings.frontend_url.rstrip('/')}/workspace/{workspace_id}/files/{file_id}/view" - - payload = { - "downloadUrl": download_url, - "viewerUrl": viewer_url, - "fileName": record.original_name, - "expiresIn": None, - } - - base = success_response(data=payload, message="Download URL generated") - return {**base, "success": True, **payload} - - -@router.get("/{workspace_id}/files/{file_id}/serve") -async def serve_workspace_file( - workspace_id: uuid.UUID, - file_id: uuid.UUID, - token: Optional[str] = Query(default=None, description="Download signature token"), - db: AsyncSession = Depends(get_db), - current_user: Optional[User] = Depends(get_current_user_optional), -): - service = WorkspaceFileService(db) - await service.validate_token_or_user(workspace_id, file_id, token, current_user) - record = await service.get_file_record(workspace_id, file_id) - file_path = service.get_file_path(record) - - if not file_path.exists(): - # deferred validation: raise a consistent error if the file is missing - await service.read_file_bytes(record) - - # use FileResponse directly to reduce memory usage - return FileResponse( - path=file_path, - media_type=record.content_type or "application/octet-stream", - filename=record.original_name, - ) diff --git a/backend/app/api/v1/workspace_folders.py b/backend/app/api/v1/workspace_folders.py deleted file mode 100644 index e9c07aa16..000000000 --- a/backend/app/api/v1/workspace_folders.py +++ /dev/null @@ -1,250 +0,0 @@ -"""Folders API (versioned path: /api/v1/folders)""" - -import uuid -from typing import Optional - -from fastapi import APIRouter, Depends, Query -from pydantic import BaseModel, Field -from sqlalchemy.ext.asyncio import AsyncSession - -from app.common.dependencies import get_current_user -from app.common.response import success_response -from app.core.database import get_db -from app.models.auth import AuthUser as User -from app.repositories.workspace_folder import WorkflowFolderRepository -from app.services.workspace_folder_service import FolderService - -router = APIRouter(prefix="/v1/folders", tags=["Folders"]) - - -class CreateFolderRequest(BaseModel): - workspaceId: uuid.UUID - name: str = Field(..., min_length=1, max_length=255) - parentId: Optional[uuid.UUID] = None - color: Optional[str] = Field(default=None, max_length=32) - - -class UpdateFolderRequest(BaseModel): - workspaceId: Optional[uuid.UUID] = None - name: Optional[str] = Field(None, min_length=1, max_length=255) - color: Optional[str] = Field(None, max_length=32) - isExpanded: Optional[bool] = None - parentId: Optional[uuid.UUID] = None - - -class DuplicateFolderRequest(BaseModel): - name: str = Field(..., min_length=1, max_length=255) - workspaceId: Optional[uuid.UUID] = None - parentId: Optional[uuid.UUID] = None - color: Optional[str] = Field(default=None, max_length=32) - - -def _serialize_folder(folder) -> dict: - return { - "id": str(folder.id), - "name": folder.name, - "workspaceId": str(folder.workspace_id), - "parentId": str(folder.parent_id) if folder.parent_id else None, - "color": folder.color, - "isExpanded": folder.is_expanded, - "sortOrder": folder.sort_order, - "createdAt": folder.created_at, - "updatedAt": folder.updated_at, - "userId": str(folder.user_id), - } - - -@router.get("") -async def list_folders( - workspace_id: uuid.UUID = Query(..., alias="workspaceId"), - db: AsyncSession = Depends(get_db), - current_user: User = Depends(get_current_user), -): - service = FolderService(db) - folders = await service.list_folders(workspace_id, current_user=current_user) - payload = [_serialize_folder(f) for f in folders] - base = success_response(data={"folders": payload}, message="Fetched folders") - return {**base, "folders": payload} - - -@router.post("") -async def create_folder( - body: CreateFolderRequest, - db: AsyncSession = Depends(get_db), - current_user: User = Depends(get_current_user), -): - service = FolderService(db) - folder = await service.create_folder( - workspace_id=body.workspaceId, - current_user=current_user, - name=body.name, - parent_id=body.parentId, - color=body.color, - is_expanded=False, - ) - payload = _serialize_folder(folder) - base = success_response(data={"folder": payload}, message="Folder created") - return {**base, "folder": payload} - - -@router.put("/{folder_id}") -async def update_folder( - folder_id: uuid.UUID, - body: UpdateFolderRequest, - db: AsyncSession = Depends(get_db), - current_user: User = Depends(get_current_user), -): - repo = WorkflowFolderRepository(db) - existing = await repo.get(folder_id) - if not existing: - from app.common.exceptions import NotFoundException - - raise NotFoundException("Folder not found") - - workspace_id = body.workspaceId or existing.workspace_id - service = FolderService(db) - folder = await service.update_folder( - folder_id, - workspace_id=workspace_id, - current_user=current_user, - name=body.name, - color=body.color, - is_expanded=body.isExpanded, - parent_id=body.parentId, - ) - payload = _serialize_folder(folder) - base = success_response(data={"folder": payload}, message="Folder updated") - return {**base, "folder": payload} - - -@router.delete("/{folder_id}") -async def delete_folder( - folder_id: uuid.UUID, - db: AsyncSession = Depends(get_db), - current_user: User = Depends(get_current_user), -): - repo = WorkflowFolderRepository(db) - existing = await repo.get(folder_id) - if not existing: - from app.common.exceptions import NotFoundException - - raise NotFoundException("Folder not found") - - service = FolderService(db) - stats = await service.delete_folder_tree( - folder_id, - workspace_id=existing.workspace_id, - current_user=current_user, - ) - base = success_response(data={"deletedItems": stats}, message="Folder deleted") - return {**base, "success": True, "deletedItems": stats} - - -@router.post("/{folder_id}/duplicate") -async def duplicate_folder( - folder_id: uuid.UUID, - body: DuplicateFolderRequest, - db: AsyncSession = Depends(get_db), - current_user: User = Depends(get_current_user), -): - repo = WorkflowFolderRepository(db) - source = await repo.get(folder_id) - if not source: - from app.common.exceptions import NotFoundException - - raise NotFoundException("Source folder not found") - - target_workspace_id = body.workspaceId or source.workspace_id - - service = FolderService(db) - new_root = await service.duplicate_folder( - folder_id, - workspace_id=target_workspace_id, - current_user=current_user, - name=body.name, - parent_id=body.parentId, - color=body.color, - ) - - result = { - "id": str(new_root.id), - "name": new_root.name, - "color": new_root.color, - "workspaceId": str(new_root.workspace_id), - "parentId": str(new_root.parent_id) if new_root.parent_id else None, - } - base = success_response(data=result, message="Folder duplicated", code=201) - return {**base, **result} - - -@router.get("/{folder_id}/graphs") -async def list_folder_graphs( - folder_id: uuid.UUID, - db: AsyncSession = Depends(get_db), - current_user: User = Depends(get_current_user), -): - """List all graphs in a folder.""" - from sqlalchemy import func, select - - from app.models.graph import AgentGraph, GraphNode - from app.repositories.graph import GraphRepository - - # verify folder exists and get workspace_id - repo = WorkflowFolderRepository(db) - folder = await repo.get(folder_id) - if not folder: - from app.common.exceptions import NotFoundException - - raise NotFoundException("Folder not found") - - # verify user permission (read access) - service = FolderService(db) - await service._ensure_permission(folder.workspace_id, current_user, "read") - - # query all graphs in this folder - GraphRepository(db) - stmt = ( - select(AgentGraph) - .where(AgentGraph.folder_id == folder_id, AgentGraph.user_id == current_user.id) - .order_by(AgentGraph.created_at.desc()) - ) - - result = await db.execute(stmt) - graphs = list(result.scalars().all()) - - # batch-query node counts - graph_ids = [graph.id for graph in graphs] - node_counts = {} - if graph_ids: - count_query = ( - select(GraphNode.graph_id, func.count(GraphNode.id).label("count")) - .where(GraphNode.graph_id.in_(graph_ids)) - .group_by(GraphNode.graph_id) - ) - count_result = await db.execute(count_query) - for row in count_result: - node_counts[row.graph_id] = row.count - - # serialize graphs - data = [] - for graph in graphs: - data.append( - { - "id": str(graph.id), - "userId": str(graph.user_id), - "workspaceId": str(graph.workspace_id) if graph.workspace_id else None, - "folderId": str(graph.folder_id) if graph.folder_id else None, - "parentId": str(graph.parent_id) if graph.parent_id else None, - "name": graph.name, - "description": graph.description, - "color": graph.color, - "isDeployed": graph.is_deployed, - "variables": graph.variables or {}, - "createdAt": graph.created_at.isoformat() if graph.created_at else None, - "updatedAt": graph.updated_at.isoformat() if graph.updated_at else None, - "nodeCount": node_counts.get(graph.id, 0), - } - ) - - base = success_response(data={"graphs": data}, message="Fetched graphs") - return {**base, "graphs": data} diff --git a/backend/app/api/v1/workspaces.py b/backend/app/api/v1/workspaces.py index 2ac773a33..394d8c275 100644 --- a/backend/app/api/v1/workspaces.py +++ b/backend/app/api/v1/workspaces.py @@ -7,8 +7,8 @@ from pydantic import BaseModel, EmailStr, Field from sqlalchemy.ext.asyncio import AsyncSession +from app.common.app_errors import AccessDeniedError, InvalidRequestError from app.common.dependencies import get_current_user, require_workspace_role -from app.common.exceptions import BadRequestException, ForbiddenException from app.common.pagination import PaginationParams from app.core.database import get_db from app.models.auth import AuthUser as User @@ -74,7 +74,11 @@ async def create_workspace( try: workspace_type = WorkspaceType(payload.type) except ValueError: - raise BadRequestException(f"Invalid workspace type: {payload.type}. Must be 'personal' or 'team'") + raise InvalidRequestError( + f"Invalid workspace type: {payload.type}. Must be 'personal' or 'team'", + code="WORKSPACE_TYPE_INVALID", + data={"workspace_type": payload.type}, + ) service = WorkspaceService(db) workspace = await service.create_workspace( @@ -220,7 +224,7 @@ async def get_my_permission( role = await service.get_user_role(workspace_id, current_user) if not role: - raise ForbiddenException("No access to workspace") + raise AccessDeniedError("No access to workspace", code="WORKSPACE_ACCESS_DENIED") # reuse the frontend's role-to-permission mapping for consistency role_to_permission = { @@ -293,12 +297,16 @@ async def update_member_role( WorkspaceMemberRole.admin, ) if not has_access: - raise ForbiddenException("Insufficient workspace permission") + raise AccessDeniedError("Insufficient workspace permission", code="WORKSPACE_PERMISSION_DENIED") try: new_role = WorkspaceMemberRole(payload.role) except ValueError: - raise BadRequestException(f"Invalid role: {payload.role}") + raise InvalidRequestError( + f"Invalid role: {payload.role}", + code="WORKSPACE_MEMBER_ROLE_INVALID", + data={"role": payload.role}, + ) service = WorkspaceService(db) member = await service.update_member_role( @@ -329,7 +337,7 @@ async def remove_member( WorkspaceMemberRole.admin, ) if not has_access: - raise ForbiddenException("Insufficient workspace permission") + raise AccessDeniedError("Insufficient workspace permission", code="WORKSPACE_PERMISSION_DENIED") service = WorkspaceService(db) await service.remove_member( diff --git a/backend/app/common/app_errors.py b/backend/app/common/app_errors.py new file mode 100644 index 000000000..143dd6169 --- /dev/null +++ b/backend/app/common/app_errors.py @@ -0,0 +1,302 @@ +from __future__ import annotations + +from dataclasses import dataclass +from typing import Any, Mapping + + +def _normalize_data(data: Mapping[str, Any] | None) -> dict[str, Any] | None: + if data is None: + return None + return dict(data) + + +@dataclass(slots=True) +class AppError(Exception): + code: str + message: str + data: Mapping[str, Any] | None = None + source: str = "internal" + retryable: bool = False + user_action: str | None = None + detail: str | None = None + + def __post_init__(self) -> None: + Exception.__init__(self, self.message) + self.data = _normalize_data(self.data) + + def to_payload(self) -> dict[str, Any]: + result: dict[str, Any] = { + "code": self.code, + "message": self.message, + "data": dict(self.data) if self.data is not None else None, + "source": self.source, + "retryable": self.retryable, + } + if self.user_action is not None: + result["user_action"] = self.user_action + if self.detail is not None: + result["detail"] = self.detail + return result + + +class DomainError(AppError): + _default_source: str = "api" + + +class InfraError(AppError): + _default_source: str = "runtime" + + +class AuthError(AppError): + _default_source: str = "auth" + + +class ValidationError(AppError): + _default_source: str = "validation" + + +class PermissionDeniedError(AppError): + _default_source: str = "permission" + + +class ConflictError(AppError): + _default_source: str = "api" + + +class RateLimitError(AppError): + _default_source: str = "api" + + +class InternalError(AppError): + _default_source: str = "internal" + + +class NotFoundError(DomainError): + def __init__( + self, + message: str = "资源不存在", + *, + code: str = "NOT_FOUND", + data: Mapping[str, Any] | None = None, + retryable: bool = False, + user_action: str | None = None, + detail: str | None = None, + **kw: Any, + ): + kw.setdefault("source", self._default_source) + super().__init__( + code=code, message=message, data=data, retryable=retryable, user_action=user_action, detail=detail, **kw + ) + + +class InvalidRequestError(DomainError): + def __init__( + self, + message: str = "请求错误", + *, + code: str = "BAD_REQUEST", + data: Mapping[str, Any] | None = None, + retryable: bool = False, + user_action: str | None = None, + detail: str | None = None, + **kw: Any, + ): + kw.setdefault("source", self._default_source) + super().__init__( + code=code, message=message, data=data, retryable=retryable, user_action=user_action, detail=detail, **kw + ) + + +class AuthenticationError(AuthError): + def __init__( + self, + message: str = "未认证", + *, + code: str = "UNAUTHORIZED", + data: Mapping[str, Any] | None = None, + retryable: bool = False, + user_action: str | None = "relogin", + detail: str | None = None, + **kw: Any, + ): + kw.setdefault("source", self._default_source) + super().__init__( + code=code, message=message, data=data, retryable=retryable, user_action=user_action, detail=detail, **kw + ) + + +class AccessDeniedError(PermissionDeniedError): + def __init__( + self, + message: str = "无权限", + *, + code: str = "FORBIDDEN", + data: Mapping[str, Any] | None = None, + retryable: bool = False, + user_action: str | None = None, + detail: str | None = None, + **kw: Any, + ): + kw.setdefault("source", self._default_source) + super().__init__( + code=code, message=message, data=data, retryable=retryable, user_action=user_action, detail=detail, **kw + ) + + +class ResourceConflictError(ConflictError): + def __init__( + self, + message: str = "资源冲突", + *, + code: str = "CONFLICT", + data: Mapping[str, Any] | None = None, + retryable: bool = False, + user_action: str | None = None, + detail: str | None = None, + **kw: Any, + ): + kw.setdefault("source", self._default_source) + super().__init__( + code=code, message=message, data=data, retryable=retryable, user_action=user_action, detail=detail, **kw + ) + + +class RateLimitExceededError(RateLimitError): + def __init__( + self, + message: str = "请求过于频繁", + *, + code: str = "RATE_LIMITED", + data: Mapping[str, Any] | None = None, + retryable: bool = True, + user_action: str | None = "retry", + detail: str | None = None, + **kw: Any, + ): + kw.setdefault("source", self._default_source) + super().__init__( + code=code, message=message, data=data, retryable=retryable, user_action=user_action, detail=detail, **kw + ) + + +class InternalServiceError(InternalError): + def __init__( + self, + message: str = "内部错误", + *, + code: str = "INTERNAL_ERROR", + data: Mapping[str, Any] | None = None, + retryable: bool = False, + user_action: str | None = None, + detail: str | None = None, + **kw: Any, + ): + kw.setdefault("source", self._default_source) + super().__init__( + code=code, message=message, data=data, retryable=retryable, user_action=user_action, detail=detail, **kw + ) + + +class ServiceUnavailableError(InfraError): + def __init__( + self, + message: str = "服务暂不可用", + *, + code: str = "SERVICE_UNAVAILABLE", + data: Mapping[str, Any] | None = None, + retryable: bool = True, + user_action: str | None = "retry", + detail: str | None = None, + **kw: Any, + ): + kw.setdefault("source", self._default_source) + super().__init__( + code=code, message=message, data=data, retryable=retryable, user_action=user_action, detail=detail, **kw + ) + + +class ClientClosedError(AppError): + _default_source: str = "api" + + def __init__( + self, + message: str = "客户端已关闭连接", + *, + code: str = "CLIENT_CLOSED", + data: Mapping[str, Any] | None = None, + retryable: bool = False, + user_action: str | None = None, + detail: str | None = None, + **kw: Any, + ): + kw.setdefault("source", self._default_source) + super().__init__( + code=code, message=message, data=data, retryable=retryable, user_action=user_action, detail=detail, **kw + ) + + +class RequestValidationAppError(ValidationError): + def __init__( + self, + message: str = "请求参数校验失败", + *, + code: str = "REQUEST_VALIDATION_ERROR", + data: Mapping[str, Any] | None = None, + retryable: bool = False, + user_action: str | None = "fix_input", + detail: str | None = None, + **kw: Any, + ): + kw.setdefault("source", self._default_source) + super().__init__( + code=code, message=message, data=data, retryable=retryable, user_action=user_action, detail=detail, **kw + ) + + +class ModelConfigError(DomainError): + MODEL_NOT_FOUND = "MODEL_NOT_FOUND" + MODEL_NO_CREDENTIALS = "MODEL_NO_CREDENTIALS" + PROVIDER_NOT_FOUND = "PROVIDER_NOT_FOUND" + MODEL_NAME_REQUIRED = "MODEL_NAME_REQUIRED" + BUILD_COPILOT_MODEL_REQUIRED = "BUILD_COPILOT_MODEL_REQUIRED" + + def __init__( + self, + code: str, + message: str = "模型配置错误", + *, + params: Mapping[str, Any] | None = None, + retryable: bool = False, + user_action: str | None = "configure_model", + detail: str | None = None, + **kw: Any, + ): + kw.setdefault("source", self._default_source) + super().__init__( + code=code, message=message, data=params, retryable=retryable, user_action=user_action, detail=detail, **kw + ) + + +def normalize_app_error( + exc: Exception, + *, + default_code: str = "INTERNAL_ERROR", + default_message: str = "内部错误", + default_data: Mapping[str, Any] | None = None, + source: str = "internal", + retryable: bool = False, +) -> AppError: + if isinstance(exc, AppError): + return exc + return InternalServiceError( + default_message, + code=default_code, + source=source, + retryable=retryable, + data={ + **(dict(default_data) if default_data is not None else {}), + "detail": str(exc), + } + if str(exc) + else (dict(default_data) if default_data is not None else None), + ) diff --git a/backend/app/common/auth_dependency.py b/backend/app/common/auth_dependency.py index 08b4f56a8..472d72ec6 100644 --- a/backend/app/common/auth_dependency.py +++ b/backend/app/common/auth_dependency.py @@ -11,8 +11,8 @@ from sqlalchemy import select from sqlalchemy.ext.asyncio import AsyncSession +from app.common.app_errors import AuthenticationError from app.common.dependencies import get_current_user -from app.common.exceptions import UnauthorizedException from app.core.database import get_db from app.models.auth import AuthUser as User from app.models.platform_token import PlatformToken @@ -67,9 +67,9 @@ async def get_current_user_or_token( # Fall through to existing session/JWT auth if request is None: - from app.common.exceptions import UnauthorizedException + from app.common.app_errors import AuthenticationError - raise UnauthorizedException("Authentication required") + raise AuthenticationError("Authentication required", code="AUTH_REQUIRED") user = await get_current_user(token=token, request=request, db=db) return AuthContext(user=user, token_scopes=None) @@ -85,13 +85,13 @@ async def _authenticate_platform_token( pt = result.scalar_one_or_none() if not pt: - raise UnauthorizedException("Invalid API token") + raise AuthenticationError("Invalid API token", code="API_TOKEN_INVALID") if not pt.is_active: - raise UnauthorizedException("API token has been revoked") + raise AuthenticationError("API token has been revoked", code="API_TOKEN_REVOKED") if pt.expires_at and pt.expires_at < datetime.now(timezone.utc): - raise UnauthorizedException("API token has expired") + raise AuthenticationError("API token has expired", code="API_TOKEN_EXPIRED") # Debounce last_used_at update now = datetime.now(timezone.utc) @@ -105,7 +105,7 @@ async def _authenticate_platform_token( user_result = await db.execute(select(User).where(User.id == pt.user_id)) user = user_result.scalar_one_or_none() if not user or not user.is_active: - raise UnauthorizedException("Token owner account is inactive") + raise AuthenticationError("Token owner account is inactive", code="API_TOKEN_OWNER_INACTIVE") return AuthContext( user=user, diff --git a/backend/app/common/dependencies.py b/backend/app/common/dependencies.py index 9a5cf9331..760599365 100644 --- a/backend/app/common/dependencies.py +++ b/backend/app/common/dependencies.py @@ -11,8 +11,8 @@ from sqlalchemy import select from sqlalchemy.ext.asyncio import AsyncSession +from app.common.app_errors import AccessDeniedError, AuthenticationError, NotFoundError from app.common.cookie_auth import extract_token_from_cookies -from app.common.exceptions import ForbiddenException, NotFoundException, UnauthorizedException from app.core.database import get_db from app.core.security import decode_token from app.models.auth import AuthUser as User @@ -45,7 +45,7 @@ async def get_current_user( logger.debug("Failed to read auth token from cookies", exc_info=True) token = token or cookie_token if not token: - raise UnauthorizedException("Missing credentials") + raise AuthenticationError("Missing credentials", code="MISSING_CREDENTIALS") # try JWT token first (JWT mode) payload = decode_token(token) @@ -54,9 +54,9 @@ async def get_current_user( result = await db.execute(select(User).where(User.id == str(user_id))) user = result.scalar_one_or_none() if user is None: - raise UnauthorizedException("User not found") + raise AuthenticationError("User not found", code="USER_NOT_FOUND") if not user.is_active: - raise UnauthorizedException("User is inactive") + raise AuthenticationError("User is inactive", code="USER_INACTIVE") return user # if JWT validation fails, try as session token (backward-compatible) @@ -66,12 +66,12 @@ async def get_current_user( result = await db.execute(select(User).where(User.id == session.user_id)) user = result.scalar_one_or_none() if user is None: - raise UnauthorizedException("User not found") + raise AuthenticationError("User not found", code="USER_NOT_FOUND") if not user.is_active: - raise UnauthorizedException("User is inactive") + raise AuthenticationError("User is inactive", code="USER_INACTIVE") return user - raise UnauthorizedException("Could not validate credentials") + raise AuthenticationError("Could not validate credentials", code="CREDENTIALS_INVALID") async def get_current_user_optional( @@ -148,17 +148,17 @@ async def checker( workspace = await ws_repo.get(workspace_id) if not workspace: - raise NotFoundException("Workspace not found") + raise NotFoundError("Workspace not found", code="WORKSPACE_NOT_FOUND") if workspace.owner_id == current_user.id: return current_user member = await member_repo.get_member(workspace_id, current_user.id) if not member: - raise ForbiddenException("No access to workspace") + raise AccessDeniedError("No access to workspace", code="WORKSPACE_ACCESS_DENIED") if _role_rank(member.role) < _role_rank(min_role): - raise ForbiddenException("Insufficient workspace permission") + raise AccessDeniedError("Insufficient workspace permission", code="WORKSPACE_PERMISSION_DENIED") return current_user @@ -195,9 +195,9 @@ async def checker( ) member = result.scalar_one_or_none() if not member: - raise ForbiddenException("No access to organization") + raise AccessDeniedError("No access to organization", code="ORGANIZATION_ACCESS_DENIED") if _rank(member.role) < _rank(min_role): - raise ForbiddenException("Insufficient organization permission") + raise AccessDeniedError("Insufficient organization permission", code="ORGANIZATION_PERMISSION_DENIED") return current_user return Depends(checker) diff --git a/backend/app/common/exceptions.py b/backend/app/common/exceptions.py index 5a4dfbc12..7a4026602 100644 --- a/backend/app/common/exceptions.py +++ b/backend/app/common/exceptions.py @@ -1,215 +1,101 @@ -""" -Unified exception system (single entry point). - -- Exception classes: all inherit from `AppException(HTTPException)`, supporting separate - `status_code` (HTTP) and `code` (business/error code), with `data` for extra error details. -- Global handlers: provide FastAPI exception handler functions and a one-call registration - function `register_exception_handlers`, ensuring the unified response format defined by - `app.common.response.error_response`. -""" - from __future__ import annotations -from typing import Any, Dict, Iterable, List, Mapping, Optional +from typing import Any, Iterable, Mapping from fastapi import HTTPException, Request, status from fastapi.exceptions import RequestValidationError from fastapi.responses import JSONResponse, Response from pydantic import ValidationError as PydanticValidationError +from app.common.app_errors import ( + AppError, + AuthError, + ClientClosedError, + ConflictError, + DomainError, + InfraError, + InternalError, + InternalServiceError, + NotFoundError, + PermissionDeniedError, + RateLimitError, + RequestValidationAppError, + ValidationError, + normalize_app_error, +) from app.common.response import error_response -class AppException(HTTPException): - """Base application exception (recommended for all business code).""" - - code: int - data: Any - - def __init__( - self, - status_code: int = status.HTTP_500_INTERNAL_SERVER_ERROR, - message: str = "Internal Server Error", - *, - code: int | None = None, - data: Any = None, - headers: Optional[Dict[str, str]] = None, - ): - super().__init__(status_code=status_code, detail=message, headers=headers) - self.code = status_code if code is None else code - self.data = data - - -# Common HTTP exceptions (raise directly from business code) - - -class NotFoundException(AppException): - """Resource not found (404).""" - - def __init__(self, message: str = "Resource not found", *, code: int | None = None, data: Any = None): - super().__init__(status_code=status.HTTP_404_NOT_FOUND, message=message, code=code, data=data) - - -class ModelConfigError(AppException): - """Model configuration error with structured error_code + params for frontend i18n. - - error_code: Frontend i18n key (e.g. MODEL_NOT_FOUND, MODEL_NO_CREDENTIALS) - params: Interpolation params (e.g. {model: "gpt-4o", provider: "openai"}) - message: English fallback (shown when frontend has no i18n key) - """ - - # Error code constants — shared with frontend i18n keys - MODEL_NOT_FOUND = "MODEL_NOT_FOUND" - MODEL_NO_CREDENTIALS = "MODEL_NO_CREDENTIALS" - PROVIDER_NOT_FOUND = "PROVIDER_NOT_FOUND" - MODEL_NAME_REQUIRED = "MODEL_NAME_REQUIRED" - - error_code: str - params: Dict[str, Any] - - def __init__( - self, - error_code: str, - message: str = "Model configuration error", - *, - params: Dict[str, Any] | None = None, - ): - self.error_code = error_code - self.params = params or {} - super().__init__( - status_code=status.HTTP_400_BAD_REQUEST, - message=message, - data={"error_code": error_code, "params": self.params}, - ) - - -class BadRequestException(AppException): - """Bad request (400).""" - - def __init__(self, message: str = "Bad request", *, code: int | None = None, data: Any = None): - super().__init__(status_code=status.HTTP_400_BAD_REQUEST, message=message, code=code, data=data) - - -class UnauthorizedException(AppException): - """Unauthorized (401).""" - - def __init__(self, message: str = "Unauthorized", *, code: int | None = None, data: Any = None): - super().__init__( - status_code=status.HTTP_401_UNAUTHORIZED, - message=message, - code=code, - data=data, - headers={"WWW-Authenticate": "Bearer"}, - ) - - -class ForbiddenException(AppException): - """Forbidden (403).""" - - def __init__(self, message: str = "Forbidden", *, code: int | None = None, data: Any = None): - super().__init__(status_code=status.HTTP_403_FORBIDDEN, message=message, code=code, data=data) - - -class ValidationException(AppException): - """Request validation failed (422).""" - - def __init__(self, message: str = "Validation error", *, code: int | None = None, data: Any = None): - super().__init__(status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, message=message, code=code, data=data) - - -class ConflictException(AppException): - """Resource conflict (409).""" - - def __init__(self, message: str = "Resource conflict", *, code: int | None = None, data: Any = None): - super().__init__(status_code=status.HTTP_409_CONFLICT, message=message, code=code, data=data) - - -class TooManyRequestsException(AppException): - """Too many requests (429).""" - - def __init__(self, message: str = "Too many requests", *, code: int | None = None, data: Any = None): - super().__init__(status_code=status.HTTP_429_TOO_MANY_REQUESTS, message=message, code=code, data=data) - - -class InternalServerException(AppException): - """Internal server error (500).""" - - def __init__(self, message: str = "Internal Server Error", *, code: int | None = 1007, data: Any = None): - super().__init__(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, message=message, code=code, data=data) - - -class ClientClosedException(AppException): - """Client disconnected early (499).""" - - def __init__(self, message: str = "Client has closed the connection", *, code: int | None = 1008, data: Any = None): - # 499 is a non-standard HTTP status code, but some gateways/logging systems use it - super().__init__(status_code=499, message=message, code=code, data=data) - - -class BusinessLogicException(BadRequestException): - """Business logic error (default 400, business code default 1006).""" - - def __init__(self, message: str, *, code: int | None = 1006, data: Any = None): - super().__init__(message=message, code=code, data=data) - - -class ParameterValidationException(BadRequestException): - """Parameter/business validation error (default 400, business code default 1001).""" - - def __init__(self, message: str, *, code: int | None = 1001, data: Any = None): - super().__init__(message=message, code=code, data=data) - - -# Aliases - -# Authentication -> 401, Authorization -> 403 -AuthenticationException = UnauthorizedException -AuthorizationException = ForbiddenException -ResourceNotFoundException = NotFoundException -ResourceConflictException = ConflictException - - -# Unified error response construction & global exception handlers - - -def create_error_response(*, status_code: int, code: int, message: str, data: Any = None) -> Response: - """Build a unified error response (conforming to app.common.response.error_response).""" +def create_error_response( + *, + status_code: int, + error: AppError, + headers: Mapping[str, str] | None = None, +) -> Response: return JSONResponse( status_code=status_code, - content=error_response(message=message, code=code, data=data), + content=error_response(error.to_payload()), + headers=dict(headers) if headers else None, ) -async def app_exception_handler(request: Request, exc: AppException) -> Response: - """Handle application exceptions (AppException).""" - code_value = getattr(exc, "code", exc.status_code) - code = code_value if isinstance(code_value, int) else exc.status_code +def _status_code_for_error(error: AppError) -> int: + if isinstance(error, ClientClosedError): + return 499 + if isinstance(error, NotFoundError): + return status.HTTP_404_NOT_FOUND + if isinstance(error, AuthError): + return status.HTTP_401_UNAUTHORIZED + if isinstance(error, PermissionDeniedError): + return status.HTTP_403_FORBIDDEN + if isinstance(error, RequestValidationAppError): + return status.HTTP_422_UNPROCESSABLE_CONTENT + if isinstance(error, ValidationError): + return status.HTTP_400_BAD_REQUEST + if isinstance(error, ConflictError): + return status.HTTP_409_CONFLICT + if isinstance(error, RateLimitError): + return status.HTTP_429_TOO_MANY_REQUESTS + if isinstance(error, InternalError | InfraError): + return status.HTTP_500_INTERNAL_SERVER_ERROR + if isinstance(error, DomainError): + return status.HTTP_400_BAD_REQUEST + return status.HTTP_500_INTERNAL_SERVER_ERROR + + +def _headers_for_error(error: AppError) -> dict[str, str] | None: + if isinstance(error, AuthError): + return {"WWW-Authenticate": "Bearer"} + return None + + +async def app_error_handler(request: Request, exc: AppError) -> Response: return create_error_response( - status_code=exc.status_code, - code=code, - message=str(exc.detail), - data=getattr(exc, "data", None), + status_code=_status_code_for_error(exc), + error=exc, + headers=_headers_for_error(exc), ) async def http_exception_handler(request: Request, exc: HTTPException) -> Response: - """Handle FastAPI/Starlette HTTPException (non-AppException).""" + error = AppError( + code=str(exc.status_code), + message=str(exc.detail), + data=None, + ) return create_error_response( status_code=exc.status_code, - code=exc.status_code, - message=str(exc.detail), - data=getattr(exc, "data", None), + error=error, + headers=exc.headers, ) -def _format_validation_errors(errors: Iterable[Mapping[str, Any]]) -> List[dict[str, Any]]: - formatted: List[dict[str, Any]] = [] +def _format_validation_errors(errors: Iterable[Mapping[str, Any]]) -> list[dict[str, Any]]: + formatted: list[dict[str, Any]] = [] for err in errors: - loc = err.get("loc", ()) - field_path = ".".join(str(x) for x in loc) formatted.append( { - "field": field_path, + "field": ".".join(str(part) for part in err.get("loc", ())), "message": err.get("msg"), "type": err.get("type"), } @@ -218,88 +104,35 @@ def _format_validation_errors(errors: Iterable[Mapping[str, Any]]) -> List[dict[ async def request_validation_exception_handler(request: Request, exc: Exception) -> Response: - """Handle request validation exceptions (RequestValidationError / PydanticValidationError).""" - errors: List[dict[str, Any]] = [] + errors: list[dict[str, Any]] = [] if isinstance(exc, (RequestValidationError, PydanticValidationError)): errors = _format_validation_errors(exc.errors()) - return create_error_response( - status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, - code=status.HTTP_422_UNPROCESSABLE_ENTITY, - message="Request parameter validation failed", - data={"validation_errors": errors} if errors else None, - ) + error = RequestValidationAppError(data={"errors": errors}) + return await app_error_handler(request, error) async def general_exception_handler(request: Request, exc: Exception) -> Response: - """Handle uncaught exceptions (500).""" + if isinstance(exc, AppError): + return await app_error_handler(request, exc) + try: from loguru import logger logger.exception("Unhandled exception: {}", exc) except Exception: - # fallback when logger is unavailable pass - debug = False - try: - from app.core.settings import settings - - debug = bool(getattr(settings, "debug", False)) - except Exception: - debug = False - - return create_error_response( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - code=status.HTTP_500_INTERNAL_SERVER_ERROR, - message=str(exc) if debug else "Internal Server Error", - data={"error_type": type(exc).__name__} if debug else None, - ) + return await app_error_handler(request, InternalServiceError()) def register_exception_handlers(app: Any) -> None: - """ - Register all exception handlers on the FastAPI app in one call. - - Note: keep this function free of hard FastAPI type dependencies to avoid circular imports. - """ - app.add_exception_handler(AppException, app_exception_handler) + app.add_exception_handler(AppError, app_error_handler) app.add_exception_handler(HTTPException, http_exception_handler) app.add_exception_handler(RequestValidationError, request_validation_exception_handler) app.add_exception_handler(PydanticValidationError, request_validation_exception_handler) app.add_exception_handler(Exception, general_exception_handler) -# Convenience raise_* helpers - - -def raise_validation_error(message: str, data: Any = None) -> None: - raise ParameterValidationException(message, code=1001, data=data) - - -def raise_auth_error(message: str = "Authentication failed, please sign in again", data: Any = None) -> None: - raise UnauthorizedException(message, code=1002, data=data) - - -def raise_permission_error(message: str = "Insufficient permissions", data: Any = None) -> None: - raise ForbiddenException(message, code=1003, data=data) - - -def raise_not_found_error(resource: str, data: Any = None) -> None: - raise NotFoundException(f"{resource} not found", code=1004, data=data) - - -def raise_conflict_error(message: str, data: Any = None) -> None: - raise ConflictException(message, code=1005, data=data) - - -def raise_client_closed_error(message: str = "Client has closed the connection", data: Any = None) -> None: - raise ClientClosedException(message, code=1008, data=data) - - -def raise_business_error(message: str, data: Any = None) -> None: - raise BusinessLogicException(message, code=1006, data=data) - - -def raise_internal_error(message: str = "Internal server error", data: Any = None) -> None: - raise InternalServerException(message, code=1007, data=data) +def normalize_exception(exc: Exception) -> AppError: + return normalize_app_error(exc) diff --git a/backend/app/common/logging.py b/backend/app/common/logging.py index c208194c1..433a9a8be 100644 --- a/backend/app/common/logging.py +++ b/backend/app/common/logging.py @@ -8,11 +8,9 @@ import logging import os import time -from collections.abc import Callable -from fastapi import Request, Response from loguru import logger -from starlette.middleware.base import BaseHTTPMiddleware +from starlette.types import ASGIApp, Message, Receive, Scope, Send from app.core.trace_context import get_trace_id, set_trace_id @@ -35,45 +33,65 @@ def emit(self, record): logger.opt(depth=depth, exception=record.exc_info).log(level, record.getMessage()) -class LoggingMiddleware(BaseHTTPMiddleware): - """HTTP request logging middleware.""" +class LoggingMiddleware: + """Pure ASGI logging middleware (avoids BaseHTTPMiddleware re-raise issues).""" + + def __init__(self, app: ASGIApp) -> None: + self.app = app + + async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: + if scope["type"] != "http": + await self.app(scope, receive, send) + return - async def dispatch(self, request: Request, call_next: Callable) -> Response: - """Process the request and log details.""" start_time = time.time() - method = request.method - path = request.url.path - client_host = request.client.host if request.client else "unknown" + method = scope.get("method", "-") + path = scope.get("path", "-") + client = scope.get("client") + client_host = client[0] if client else "unknown" + + request_id = None + for key, value in scope.get("headers", []): + if key == b"x-request-id": + request_id = value.decode() + break + trace_id = set_trace_id(request_id) - trace_id = set_trace_id(request.headers.get("X-Request-ID") or None) - request.state.trace_id = trace_id log = logger.bind(trace_id=trace_id, method=method, path=path, client=client_host) - log.info("request.start") - try: - response = await call_next(request) - - process_time = time.time() - start_time - status_code = response.status_code - message = f"request.completed status={status_code} duration={process_time:.3f}s" - - if status_code >= 500: - log.error(message) - elif status_code >= 400: - log.warning(message) - else: - log.info(message) - - response.headers["X-Process-Time"] = str(process_time) - response.headers["X-Trace-Id"] = trace_id - return response + status_code = 500 + response_process_time = 0.0 + + async def send_wrapper(message: Message) -> None: + nonlocal status_code, response_process_time + if message["type"] == "http.response.start": + status_code = message["status"] + response_process_time = time.time() - start_time + if "headers" not in message: + message["headers"] = [] + message["headers"] = list(message["headers"]) + [ + [b"x-process-time", str(response_process_time).encode()], + [b"x-trace-id", trace_id.encode()], + ] + await send(message) + try: + await self.app(scope, receive, send_wrapper) except Exception as e: process_time = time.time() - start_time log.opt(exception=True).error(f"request.failed duration={process_time:.3f}s error={type(e).__name__}") raise + process_time = response_process_time or (time.time() - start_time) + message = f"request.completed status={status_code} duration={process_time:.3f}s" + if status_code >= 500: + log.error(message) + elif status_code >= 400: + log.warning(message) + else: + log.info(message) + def setup_logging(): """ diff --git a/backend/app/common/response.py b/backend/app/common/response.py index ba9cd1127..02522daf9 100644 --- a/backend/app/common/response.py +++ b/backend/app/common/response.py @@ -1,6 +1,4 @@ -""" -Unified response format. -""" +"""Unified response format.""" from typing import Any, Generic, List, Optional, TypeVar @@ -51,19 +49,9 @@ def success_response( } -def error_response( - message: str = "Error", - code: int = 400, - data: Any = None, -) -> dict: - """Build an error response.""" - return { - "success": False, - "code": code, - "message": message, - "data": data, - "timestamp": utc_now().isoformat() + "Z", - } +def error_response(error: dict[str, Any]) -> dict: + """Build a canonical error response.""" + return dict(error) def paginated_response( diff --git a/backend/app/common/skill_permissions.py b/backend/app/common/skill_permissions.py index a9e749924..d12cb39cc 100644 --- a/backend/app/common/skill_permissions.py +++ b/backend/app/common/skill_permissions.py @@ -7,7 +7,7 @@ from sqlalchemy import and_, select from sqlalchemy.ext.asyncio import AsyncSession -from app.common.exceptions import ForbiddenException +from app.common.app_errors import AccessDeniedError from app.common.permissions import check_token_permission from app.models.skill import Skill from app.models.skill_collaborator import CollaboratorRole, SkillCollaborator @@ -44,7 +44,7 @@ async def check_skill_access( """ Unified permission check. - Raises ForbiddenException if the user lacks sufficient access. + Raises AccessDeniedError if the user lacks sufficient access. """ # 1. Superuser bypass if is_superuser: @@ -67,7 +67,11 @@ async def check_skill_access( _check_token_scope(token_scopes, required_scope, str(skill.id), token_resource_type, token_resource_id) return - raise ForbiddenException("You don't have permission to access this skill") + raise AccessDeniedError( + "You don't have permission to access this skill", + code="SKILL_ACCESS_DENIED", + data={"skill_id": str(skill.id), "user_id": user_id, "min_role": min_role.value}, + ) def _check_token_scope( @@ -88,4 +92,8 @@ def _check_token_scope( token_resource_id=str(token_resource_id) if token_resource_id else None, ) if not has_permission: - raise ForbiddenException(f"Token missing required scope or resource binding: {required_scope}") + raise AccessDeniedError( + f"Token missing required scope or resource binding: {required_scope}", + code="SKILL_TOKEN_SCOPE_FORBIDDEN", + data={"skill_id": skill_id, "required_scope": required_scope}, + ) diff --git a/backend/app/common/stream_errors.py b/backend/app/common/stream_errors.py new file mode 100644 index 000000000..fe3a229a2 --- /dev/null +++ b/backend/app/common/stream_errors.py @@ -0,0 +1,25 @@ +from __future__ import annotations + +import json +from typing import Any, Mapping + + +def stream_error_event( + *, + code: str, + message: str, + data: Mapping[str, Any] | None = None, + source: str = "internal", + retryable: bool = False, + user_action: str | None = None, +) -> str: + payload: dict[str, Any] = { + "code": code, + "message": message, + "data": dict(data) if data is not None else None, + "source": source, + "retryable": retryable, + } + if user_action is not None: + payload["user_action"] = user_action + return f"event: error\ndata: {json.dumps(payload)}\n\n" diff --git a/backend/app/core/agent/artifacts/collector.py b/backend/app/core/agent/artifacts/collector.py index 2fdee4f6a..94a09d361 100644 --- a/backend/app/core/agent/artifacts/collector.py +++ b/backend/app/core/agent/artifacts/collector.py @@ -9,13 +9,13 @@ import json import mimetypes -import os from datetime import datetime, timezone from pathlib import Path from typing import Any from loguru import logger +from app.core.settings import settings from app.utils.path_utils import sanitize_path_component MANIFEST_FILENAME = "_manifest.json" @@ -26,7 +26,7 @@ def _default_artifacts_root() -> Path: def resolve_artifacts_root() -> Path: - env = os.getenv("AGENT_ARTIFACTS_ROOT", "").strip() + env = (settings.agent_artifacts_root or "").strip() if env: return Path(env).expanduser().resolve() return _default_artifacts_root().resolve() diff --git a/backend/app/core/agent/backends/pydantic_adapter.py b/backend/app/core/agent/backends/pydantic_adapter.py index ea523b370..1f00269f2 100644 --- a/backend/app/core/agent/backends/pydantic_adapter.py +++ b/backend/app/core/agent/backends/pydantic_adapter.py @@ -29,6 +29,7 @@ from loguru import logger from pydantic_ai_backends import DockerSandbox +from app.common.app_errors import ServiceUnavailableError from app.core.agent.backends.constants import ( DEFAULT_AUTO_REMOVE, DEFAULT_COMMAND_TIMEOUT, @@ -44,6 +45,7 @@ list_builtin_runtimes, resolve_runtime, ) +from app.core.agent.backends.utils.command_executor import DANGEROUS_RE from app.utils.backend_utils import create_execute_response # Re-export for backward compatibility @@ -234,7 +236,11 @@ def __init__( logger.info(f"DockerSandbox created: id={self._id}, image={self.image}") except Exception as e: logger.error(f"Failed to create DockerSandbox for adapter {self._id}: {e}", exc_info=True) - raise RuntimeError(f"Failed to create DockerSandbox: {e}") from e + raise ServiceUnavailableError( + "Failed to create Docker sandbox", + code="DOCKER_SANDBOX_CREATE_FAILED", + data={"sandbox_id": self._id, "image": self.image, "detail": str(e)}, + ) from e # Start the sandbox logger.debug(f"Starting sandbox {self._id}...") @@ -366,24 +372,11 @@ def start(self) -> None: logger.info(f"Sandbox {self._id} started (image={self.image})") except Exception as e: logger.error(f"Failed to start sandbox {self._id}: {e}") - raise RuntimeError(f"Failed to start sandbox {self._id}: {e}") from e - - # Dangerous command patterns (defense-in-depth, not sole security boundary) - _DANGEROUS_PATTERNS = [ - r"rm\s+-rf\s+/\s*$", # rm -rf / - r"mkfs\.", # format disk - r"dd\s+.*of=/dev/", # write to device - r":\(\)\s*\{", # fork bomb :(){ :|:& };: - ] - _DANGEROUS_RE = None # Lazy-compiled combined regex - - @classmethod - def _get_dangerous_re(cls): - if cls._DANGEROUS_RE is None: - import re - - cls._DANGEROUS_RE = re.compile("|".join(f"(?:{p})" for p in cls._DANGEROUS_PATTERNS)) - return cls._DANGEROUS_RE + raise ServiceUnavailableError( + "Failed to start Docker sandbox", + code="DOCKER_SANDBOX_START_FAILED", + data={"sandbox_id": self._id, "image": self.image, "detail": str(e)}, + ) from e def _exec_command(self, command: str) -> tuple[str, int]: """Execute command in sandbox with safety checks. @@ -394,7 +387,7 @@ def _exec_command(self, command: str) -> tuple[str, int]: Returns: Tuple of (output, exit_code) """ - if self._get_dangerous_re().search(command): + if DANGEROUS_RE.search(command): logger.warning(f"[{self._id}] Blocked dangerous command: {command[:100]}") return "Error: command blocked by security policy", 1 diff --git a/backend/app/core/agent/backends/utils/command_executor.py b/backend/app/core/agent/backends/utils/command_executor.py index 6195772db..7f6c48c4c 100644 --- a/backend/app/core/agent/backends/utils/command_executor.py +++ b/backend/app/core/agent/backends/utils/command_executor.py @@ -10,6 +10,7 @@ - Output truncation support """ +import re import subprocess from typing import Optional @@ -21,6 +22,16 @@ ) from app.utils.backend_utils import create_execute_response +# Shared denylist for obviously destructive commands. +# Defense-in-depth — shell=True is kept for LLM agent pipe/redirect support. +DANGEROUS_PATTERNS = [ + r"rm\s+-rf\s+/\s*$", # rm -rf / + r"mkfs\.", # format disk + r"dd\s+.*of=/dev/", # write to device + r":\(\)\s*\{", # fork bomb :(){ :|:& };: +] +DANGEROUS_RE = re.compile("|".join(f"(?:{p})" for p in DANGEROUS_PATTERNS)) + def combine_stdout_stderr(stdout: Optional[str], stderr: Optional[str]) -> str: """Combine stdout and stderr into a single output string. @@ -106,6 +117,9 @@ def execute_local_command( total 0 ... """ + if DANGEROUS_RE.search(command): + return create_error_response("Command blocked by security policy") + try: result = subprocess.run( command, diff --git a/backend/app/core/agent/checkpointer/checkpointer.py b/backend/app/core/agent/checkpointer/checkpointer.py index 0ca5adeac..4bdbdb613 100644 --- a/backend/app/core/agent/checkpointer/checkpointer.py +++ b/backend/app/core/agent/checkpointer/checkpointer.py @@ -5,12 +5,13 @@ Centralize all checkpointer logic and provide a unified interface. """ -import os from typing import TYPE_CHECKING, Optional from loguru import logger from psycopg_pool import AsyncConnectionPool +from app.common.app_errors import InternalServiceError, ServiceUnavailableError + if TYPE_CHECKING: from langgraph.checkpoint.postgres.aio import AsyncPostgresSaver @@ -63,10 +64,12 @@ async def initialize(cls) -> None: try: db_uri = cls._get_db_uri() + from app.core.settings import settings as _settings + cls._pool = AsyncConnectionPool( conninfo=db_uri, - min_size=int(os.getenv("DB_POOL_MIN_SIZE", 1)), - max_size=int(os.getenv("DB_POOL_MAX_SIZE", 10)), + min_size=_settings.checkpointer_pool_min_size, + max_size=_settings.checkpointer_pool_max_size, kwargs={"autocommit": True, "prepare_threshold": 0}, open=False, # do not auto-open in constructor ) @@ -75,7 +78,7 @@ async def initialize(cls) -> None: cls._initialized = True logger.info( f"CheckpointerManager initialized | " - f"pool_size={os.getenv('DB_POOL_MIN_SIZE', 1)}-{os.getenv('DB_POOL_MAX_SIZE', 10)}" + f"pool_size={_settings.checkpointer_pool_min_size}-{_settings.checkpointer_pool_max_size}" ) # initialize database table schema @@ -103,7 +106,11 @@ async def _init_db(cls) -> None: Exception: If table creation fails. """ if not cls._pool: - raise RuntimeError("Pool not initialized. Call initialize() first.") + raise InternalServiceError( + "Checkpointer pool is not initialized", + code="CHECKPOINTER_POOL_UNINITIALIZED", + data={"operation": "setup"}, + ) from langgraph.checkpoint.postgres.aio import AsyncPostgresSaver @@ -122,8 +129,10 @@ def _get_pool(cls) -> AsyncConnectionPool: RuntimeError: If CheckpointerManager is not initialized. """ if not cls._pool: - raise RuntimeError( - "CheckpointerManager not initialized. Call CheckpointerManager.initialize() at application startup." + raise InternalServiceError( + "Checkpointer manager is not initialized", + code="CHECKPOINTER_MANAGER_UNINITIALIZED", + data={"operation": "get_pool"}, ) return cls._pool @@ -193,7 +202,11 @@ async def delete_thread_checkpoints(thread_id: str) -> None: """ checkpointer = get_checkpointer() if checkpointer is None: - raise RuntimeError("Checkpoint is not enabled. Enable checkpoint in settings to use this function.") + raise ServiceUnavailableError( + "Checkpoint is not enabled", + code="CHECKPOINTER_DISABLED", + data=None, + ) try: await checkpointer.adelete_thread(thread_id) diff --git a/backend/app/core/agent/cli_backends/__init__.py b/backend/app/core/agent/cli_backends/__init__.py new file mode 100644 index 000000000..e648e1f7e --- /dev/null +++ b/backend/app/core/agent/cli_backends/__init__.py @@ -0,0 +1,13 @@ +from .base import CLIMessage, CLIResult, RuntimeProvider, RuntimeSession +from .registry import RuntimeProviderRegistry +from .session_registry import SessionRegistry, session_registry + +__all__ = [ + "CLIMessage", + "CLIResult", + "RuntimeProvider", + "RuntimeSession", + "RuntimeProviderRegistry", + "SessionRegistry", + "session_registry", +] diff --git a/backend/app/core/agent/cli_backends/base.py b/backend/app/core/agent/cli_backends/base.py new file mode 100644 index 000000000..2cdce9409 --- /dev/null +++ b/backend/app/core/agent/cli_backends/base.py @@ -0,0 +1,88 @@ +from __future__ import annotations + +import asyncio +import json +from dataclasses import dataclass +from typing import Any, AsyncIterator, Awaitable, Callable, Protocol + + +@dataclass +class CLIMessage: + type: str # "text" | "thinking" | "tool_use" | "tool_result" | "error" | "artifact" | "approval_request" + content: str = "" + tool: str = "" + call_id: str = "" + input: dict | None = None + output: str = "" + error_payload: dict[str, Any] | None = None + # Observation/instrumentation fields — used by CLIObservationExtractor + tool_name: str | None = None + tool_input: dict | None = None + usage: dict | None = None + + +def build_control_response(request_id: str, behavior: str) -> str: + return json.dumps( + { + "type": "control_response", + "response": { + "subtype": "success", + "request_id": request_id, + "response": {"behavior": behavior}, + }, + } + ) + + +@dataclass +class CLIResult: + status: str # "completed" | "failed" | "timeout" | "blocked" + output: str = "" + error: str = "" + error_payload: dict[str, Any] | None = None + session_id: str = "" + branch_name: str = "" + usage: dict | None = None + + +@dataclass +class RuntimeSession: + messages: asyncio.Queue[CLIMessage | None] + result: asyncio.Future[CLIResult] + _inject_fn: Callable[[str], Awaitable[None]] | None = None + _cancel_fn: Callable[[], Awaitable[None]] | None = None + _drain_task: asyncio.Task | None = None + + async def inject_message(self, message: str) -> None: + if self._inject_fn: + await self._inject_fn(message) + + async def cancel(self) -> None: + if self._cancel_fn: + await self._cancel_fn() + if self._drain_task: + self._drain_task.cancel() + + async def iter_messages(self) -> AsyncIterator[CLIMessage]: + while True: + msg = await self.messages.get() + if msg is None: + break + yield msg + + +class RuntimeProvider(Protocol): + provider_type: str + + async def execute( + self, + prompt: str, + *, + container_id: str, + cwd: str | None = None, + model: str | None = None, + timeout: int = 7200, + resume_session_id: str | None = None, + env: dict[str, str] | None = None, + auto_approve: bool = True, + ) -> RuntimeSession: ... diff --git a/backend/app/core/agent/cli_backends/claude_code.py b/backend/app/core/agent/cli_backends/claude_code.py new file mode 100644 index 000000000..564ecc774 --- /dev/null +++ b/backend/app/core/agent/cli_backends/claude_code.py @@ -0,0 +1,294 @@ +from __future__ import annotations + +import asyncio +import json +from typing import Any + +from loguru import logger + +from .base import CLIMessage, CLIResult, RuntimeSession +from .container_bridge import ContainerProcessBridge + + +class ClaudeCodeProvider: + provider_type = "claude_code" + + def __init__(self, executable_path: str = "claude"): + self.executable_path = executable_path + self.bridge = ContainerProcessBridge() + + async def execute( + self, + prompt: str, + *, + container_id: str, + cwd: str | None = None, + model: str | None = None, + timeout: int = 7200, + resume_session_id: str | None = None, + env: dict[str, str] | None = None, + auto_approve: bool = True, + ) -> RuntimeSession: + cmd = [ + self.executable_path, + "-p", + "--output-format", + "stream-json", + "--input-format", + "stream-json", + "--verbose", + "--max-turns", + "200", + "--permission-mode", + "bypassPermissions" if auto_approve else "default", + ] + if model: + cmd.extend(["--model", model]) + if resume_session_id: + cmd.extend(["--resume", resume_session_id]) + + process = await self.bridge.exec_streaming( + container_id, + cmd, + env=env, + workdir=cwd, + ) + logger.info(f"[claude] docker exec started for container {container_id[:12]}, pid={process.pid}") + + # Send the initial prompt via stdin as stream-json (not --print) + if not resume_session_id and process.stdin: + user_msg = json.dumps( + { + "type": "user", + "message": { + "role": "user", + "content": [{"type": "text", "text": prompt}], + }, + } + ) + process.stdin.write(f"{user_msg}\n".encode()) + await process.stdin.drain() + logger.info(f"[claude] Sent initial prompt via stdin ({len(prompt)} chars)") + + queue: asyncio.Queue[CLIMessage | None] = asyncio.Queue(maxsize=512) + loop = asyncio.get_event_loop() + result_future: asyncio.Future[CLIResult] = loop.create_future() + + drain_task = asyncio.create_task( + self._drain(process, queue, result_future, timeout), + name=f"claude-drain-{container_id[:12]}", + ) + + async def inject(message: str) -> None: + if process.stdin and not process.stdin.is_closing(): + process.stdin.write(f"{message}\n".encode()) + await process.stdin.drain() + + async def cancel() -> None: + process.terminate() + + return RuntimeSession( + messages=queue, + result=result_future, + _inject_fn=inject, + _cancel_fn=cancel, + _drain_task=drain_task, + ) + + async def _drain( + self, + process: asyncio.subprocess.Process, + queue: asyncio.Queue[CLIMessage | None], + result_future: asyncio.Future[CLIResult], + timeout: int, + ) -> None: + accumulated_text: list[str] = [] + session_id = "" + usage: dict = {} + is_error = False + + try: + async with asyncio.timeout(timeout): + assert process.stdout is not None + logger.info("[claude] Drain loop started, reading stdout...") + async for raw_line in process.stdout: + line = raw_line.decode().strip() + if not line: + continue + try: + event = json.loads(line) + except json.JSONDecodeError: + logger.warning(f"[claude] Non-JSON line from stdout: {line[:200]}") + continue + + if not isinstance(event, dict): + continue + + event_type = event.get("type", "unknown") + logger.info(f"[claude] Received event: type={event_type}") + + for msg in self._parse_event(event): + if msg.type == "text": + accumulated_text.append(msg.content) + await queue.put(msg) + + if event.get("type") == "result": + result_data = event.get("result", {}) + if isinstance(result_data, dict): + session_id = result_data.get("session_id", "") + if "usage" in event: + usage = event["usage"] + if event.get("is_error"): + is_error = True + # result received — close stdin so the process exits cleanly + if process.stdin and not process.stdin.is_closing(): + process.stdin.close() + break + + except TimeoutError: + if not result_future.done(): + result_future.set_result( + CLIResult( + status="timeout", + error="Agent timed out", + error_payload={ + "code": "CLAUDE_CODE_TIMEOUT", + "message": "Agent timed out", + "data": None, + "source": "runtime", + "retryable": True, + }, + ) + ) + except Exception as e: + logger.error(f"Claude drain error: {e}") + if not result_future.done(): + result_future.set_result( + CLIResult( + status="failed", + error=str(e), + error_payload={ + "code": "CLAUDE_CODE_DRAIN_FAILED", + "message": str(e), + "data": None, + "source": "runtime", + "retryable": False, + }, + ) + ) + finally: + if not result_future.done(): + exit_code = await process.wait() + if is_error: + result_future.set_result( + CLIResult( + status="failed", + output="\n".join(accumulated_text), + error="\n".join(accumulated_text) or "Claude Code reported an error", + error_payload={ + "code": "CLAUDE_CODE_EXECUTION_FAILED", + "message": "\n".join(accumulated_text) or "Claude Code reported an error", + "data": None, + "source": "runtime", + "retryable": False, + }, + session_id=session_id, + usage=usage, + ) + ) + elif exit_code == 0 or accumulated_text: + result_future.set_result( + CLIResult( + status="completed", + output="\n".join(accumulated_text), + session_id=session_id, + usage=usage, + ) + ) + else: + stderr_bytes = await process.stderr.read() if process.stderr else b"" + result_future.set_result( + CLIResult( + status="failed", + error=f"Exit code {exit_code}: {stderr_bytes.decode()[:2000]}", + error_payload={ + "code": "CLAUDE_CODE_EXIT_FAILED", + "message": f"Exit code {exit_code}: {stderr_bytes.decode()[:2000]}", + "data": {"exit_code": exit_code}, + "source": "runtime", + "retryable": False, + }, + usage=usage, + ) + ) + await queue.put(None) + + def _parse_event(self, event: dict) -> list[CLIMessage]: + messages: list[CLIMessage] = [] + event_type = event.get("type", "") + + if event_type == "assistant" and "message" in event: + msg = event["message"] + for block in msg.get("content", []) if isinstance(msg, dict) else []: + if isinstance(block, str): + messages.append(CLIMessage(type="text", content=block)) + continue + if not isinstance(block, dict): + continue + block_type = block.get("type", "") + if block_type == "text": + messages.append(CLIMessage(type="text", content=block.get("text", ""))) + elif block_type == "tool_use": + messages.append( + CLIMessage( + type="tool_use", + tool=block.get("name", ""), + call_id=block.get("id", ""), + input=block.get("input"), + ) + ) + elif block_type == "thinking": + messages.append(CLIMessage(type="thinking", content=block.get("thinking", ""))) + + elif event_type == "tool_result": + messages.append( + CLIMessage( + type="tool_result", + tool=event.get("tool", ""), + call_id=event.get("call_id", ""), + output=str(event.get("output", ""))[:8192], + ) + ) + + elif event_type == "control_request": + request = event.get("request", {}) + messages.append( + CLIMessage( + type="approval_request", + tool=request.get("tool_name", ""), + call_id=event.get("request_id", ""), + input=request.get("input"), + content=request.get("subtype", ""), + ) + ) + elif event_type == "system" and event.get("subtype") == "error": + error_payload = _extract_claude_error_payload(event) + messages.append( + CLIMessage( + type="error", + content=error_payload["message"], + error_payload=error_payload, + ) + ) + + return messages + + +def _extract_claude_error_payload(event: dict[str, Any]) -> dict[str, Any]: + return { + "code": str(event.get("code") or "CLAUDE_CODE_RUNTIME_ERROR"), + "message": str(event.get("message") or "Claude Code runtime error"), + "data": event.get("data") if isinstance(event.get("data"), dict) else None, + "source": "runtime", + "retryable": False, + } diff --git a/backend/app/core/agent/cli_backends/codex.py b/backend/app/core/agent/cli_backends/codex.py new file mode 100644 index 000000000..bcab5d92c --- /dev/null +++ b/backend/app/core/agent/cli_backends/codex.py @@ -0,0 +1,526 @@ +from __future__ import annotations + +import asyncio +import json +from typing import Any + +from loguru import logger + +from app.common.app_errors import InternalServiceError +from app.utils.safe_task import safe_create_task + +from .base import CLIMessage, CLIResult, RuntimeSession +from .container_bridge import ContainerProcessBridge + + +class CodexProvider: + """Runtime provider for OpenAI Codex CLI. + + Codex uses JSON-RPC 2.0 over stdio. We spawn ``codex app-server --listen + stdio://``, perform the initialize / thread/start / turn/start handshake, + then read NDJSON lines from stdout. Each line is either: + + * A *response* (has ``id`` + ``result`` or ``error``) — matched to a + pending request. + * A *server request* (has ``id`` + ``method``) — auto-approved. + * A *notification* (has ``method``, no ``id``) — mapped to CLIMessage. + """ + + provider_type = "codex" + + def __init__(self, executable_path: str = "codex") -> None: + self.executable_path = executable_path + self.bridge = ContainerProcessBridge() + + # ── public API ────────────────────────────────────────────────────── + + async def execute( + self, + prompt: str, + *, + container_id: str, + cwd: str | None = None, + model: str | None = None, + timeout: int = 7200, + resume_session_id: str | None = None, + env: dict[str, str] | None = None, + auto_approve: bool = True, + ) -> RuntimeSession: + cmd = [self.executable_path, "app-server", "--listen", "stdio://"] + + process = await self.bridge.exec_streaming( + container_id, + cmd, + env=env, + workdir=cwd, + ) + + queue: asyncio.Queue[CLIMessage | None] = asyncio.Queue(maxsize=512) + loop = asyncio.get_event_loop() + result_future: asyncio.Future[CLIResult] = loop.create_future() + + drain_task = asyncio.create_task( + self._drain(process, queue, result_future, prompt, model, timeout), + name=f"codex-drain-{container_id[:12]}", + ) + + async def inject(message: str) -> None: + if process.stdin and not process.stdin.is_closing(): + process.stdin.write(f"{message}\n".encode()) + await process.stdin.drain() + + async def cancel() -> None: + process.terminate() + + return RuntimeSession( + messages=queue, + result=result_future, + _inject_fn=inject, + _cancel_fn=cancel, + _drain_task=drain_task, + ) + + # ── JSON-RPC helpers ──────────────────────────────────────────────── + + _next_id: int = 0 + + async def _rpc_request( + self, + process: asyncio.subprocess.Process, + method: str, + params: dict, + ) -> dict: + """Send a JSON-RPC request and wait for the matching response.""" + self._next_id += 1 + req_id = self._next_id + msg = {"jsonrpc": "2.0", "id": req_id, "method": method, "params": params} + line = json.dumps(msg) + "\n" + assert process.stdin is not None + process.stdin.write(line.encode()) + await process.stdin.drain() + return {"_pending_id": req_id} + + async def _rpc_notify( + self, + process: asyncio.subprocess.Process, + method: str, + ) -> None: + msg = {"jsonrpc": "2.0", "method": method} + line = json.dumps(msg) + "\n" + assert process.stdin is not None + process.stdin.write(line.encode()) + await process.stdin.drain() + + async def _rpc_respond( + self, + process: asyncio.subprocess.Process, + req_id: int, + result: dict, + ) -> None: + msg = {"jsonrpc": "2.0", "id": req_id, "result": result} + line = json.dumps(msg) + "\n" + assert process.stdin is not None + process.stdin.write(line.encode()) + await process.stdin.drain() + + # ── drain loop ────────────────────────────────────────────────────── + + async def _drain( + self, + process: asyncio.subprocess.Process, + queue: asyncio.Queue[CLIMessage | None], + result_future: asyncio.Future[CLIResult], + prompt: str, + model: str | None, + timeout: int, + ) -> None: + accumulated_text: list[str] = [] + pending: dict[int, asyncio.Future[dict]] = {} + turn_done: asyncio.Future[bool] = asyncio.get_event_loop().create_future() + + async def wait_response(req_id: int) -> dict: + """Register a pending request and wait for its response.""" + fut: asyncio.Future[dict] = asyncio.get_event_loop().create_future() + pending[req_id] = fut + return await fut + + def handle_line(line: str) -> None: + """Route a single JSON-RPC line.""" + try: + raw = json.loads(line) + except json.JSONDecodeError: + return + if not isinstance(raw, dict): + return + + has_id = "id" in raw + + # Response to our request + if has_id and ("result" in raw or "error" in raw): + req_id = raw.get("id") + if req_id in pending and not pending[req_id].done(): + if "error" in raw: + pending[req_id].set_exception(RuntimeError(f"RPC error: {raw['error']}")) + else: + pending[req_id].set_result(raw.get("result", {})) + return + + # Server request (has id + method) — auto-approve + if has_id and "method" in raw: + method = raw.get("method", "") + safe_create_task( + self._rpc_respond(process, raw["id"], {"decision": "accept"}), + name=f"codex-rpc-{raw['id']}", + ) + return + + # Notification (no id, has method) + if "method" in raw: + for msg in self._parse_notification(raw): + if msg.type == "text": + accumulated_text.append(msg.content) + try: + queue.put_nowait(msg) + except asyncio.QueueFull: + logger.warning(f"Codex event queue full, dropping message: {msg.type}") + + # Detect turn completion + method = raw.get("method", "") + params = raw.get("params", {}) + if method == "turn/completed": + status = _nested_str(params, "turn", "status") + aborted = status in ("cancelled", "canceled", "aborted", "interrupted") + if not turn_done.done(): + turn_done.set_result(aborted) + elif method == "codex/event": + msg_data = (params or {}).get("msg", {}) + if isinstance(msg_data, dict): + msg_type = msg_data.get("type", "") + if msg_type == "task_complete": + if not turn_done.done(): + turn_done.set_result(False) + elif msg_type == "turn_aborted": + if not turn_done.done(): + turn_done.set_result(True) + + try: + async with asyncio.timeout(timeout): + # 1. Send initialize + await self._rpc_request( + process, + "initialize", + { + "clientInfo": { + "name": "joysafeter-agent", + "title": "JoySafeter Agent", + "version": "0.1.0", + }, + "capabilities": {"experimentalApi": True}, + }, + ) + init_id = self._next_id + + # Read lines until we get the initialize response + assert process.stdout is not None + async for raw_line in process.stdout: + line = raw_line.decode().strip() + if not line: + continue + try: + raw = json.loads(line) + except json.JSONDecodeError: + continue + if isinstance(raw, dict) and raw.get("id") == init_id and "result" in raw: + break + handle_line(line) + + # 2. Send initialized notification + await self._rpc_notify(process, "initialized") + + # 3. Start thread + await self._rpc_request( + process, + "thread/start", + { + "model": model, + "cwd": None, + "approvalPolicy": None, + "sandbox": None, + }, + ) + thread_start_id = self._next_id + thread_id = "" + + assert process.stdout is not None + async for raw_line in process.stdout: + line = raw_line.decode().strip() + if not line: + continue + try: + raw = json.loads(line) + except json.JSONDecodeError: + continue + if isinstance(raw, dict) and raw.get("id") == thread_start_id and "result" in raw: + result_data = raw.get("result", {}) + thread_data = result_data.get("thread", {}) if isinstance(result_data, dict) else {} + thread_id = thread_data.get("id", "") if isinstance(thread_data, dict) else "" + break + handle_line(line) + + if not thread_id: + raise InternalServiceError( + "Codex thread start returned no thread ID", + code="CODEX_THREAD_START_INVALID", + data=None, + ) + + logger.info(f"codex thread started: {thread_id}") + + # 4. Start turn + await self._rpc_request( + process, + "turn/start", + { + "threadId": thread_id, + "input": [{"type": "text", "text": prompt}], + }, + ) + + # 5. Read events until turn completes + assert process.stdout is not None + async for raw_line in process.stdout: + line = raw_line.decode().strip() + if not line: + continue + handle_line(line) + if turn_done.done(): + break + + except TimeoutError: + if not result_future.done(): + result_future.set_result( + CLIResult( + status="timeout", + error="Codex agent timed out", + error_payload={ + "code": "CODEX_TIMEOUT", + "message": "Codex agent timed out", + "data": None, + "source": "runtime", + "retryable": True, + }, + ) + ) + except Exception as e: + logger.error(f"Codex drain error: {e}") + if not result_future.done(): + result_future.set_result( + CLIResult( + status="failed", + error=str(e), + error_payload={ + "code": "CODEX_DRAIN_FAILED", + "message": str(e), + "data": None, + "source": "runtime", + "retryable": False, + }, + ) + ) + finally: + if not result_future.done(): + exit_code = await process.wait() + aborted = turn_done.done() and turn_done.result() + if aborted: + result_future.set_result( + CLIResult( + status="failed", + output="\n".join(accumulated_text), + error="Turn was aborted", + error_payload={ + "code": "CODEX_TURN_ABORTED", + "message": "Turn was aborted", + "data": None, + "source": "runtime", + "retryable": False, + }, + ) + ) + elif exit_code == 0 or accumulated_text: + result_future.set_result( + CLIResult( + status="completed", + output="\n".join(accumulated_text), + ) + ) + else: + stderr_bytes = await process.stderr.read() if process.stderr else b"" + result_future.set_result( + CLIResult( + status="failed", + error=f"Exit code {exit_code}: {stderr_bytes.decode()[:2000]}", + error_payload={ + "code": "CODEX_EXIT_FAILED", + "message": f"Exit code {exit_code}: {stderr_bytes.decode()[:2000]}", + "data": {"exit_code": exit_code}, + "source": "runtime", + "retryable": False, + }, + ) + ) + await queue.put(None) + + # ── event parsing (testable without Docker) ───────────────────────── + + def _parse_notification(self, raw: dict) -> list[CLIMessage]: + """Parse a JSON-RPC notification into CLIMessage list. + + Handles both legacy ``codex/event`` notifications and raw v2 + ``item/*`` / ``turn/*`` notifications. + """ + method = raw.get("method", "") + params = raw.get("params", {}) or {} + + # Legacy codex/event format + if method == "codex/event" or method.startswith("codex/event/"): + return self._parse_legacy_event(params) + + # Raw v2 item notifications + if method.startswith("item/"): + return self._parse_item_notification(method, params) + + if method == "turn/error": + payload = _extract_codex_error_payload(params) + return [CLIMessage(type="error", content=payload["message"], error_payload=payload)] + + return [] + + def _parse_legacy_event(self, params: dict) -> list[CLIMessage]: + msg_data = params.get("msg") + if not isinstance(msg_data, dict): + return [] + + msg_type = msg_data.get("type", "") + messages: list[CLIMessage] = [] + + if msg_type == "agent_message": + text = msg_data.get("message", "") + if text: + messages.append(CLIMessage(type="text", content=text)) + elif msg_type == "exec_command_begin": + messages.append( + CLIMessage( + type="tool_use", + tool="exec_command", + call_id=msg_data.get("call_id", ""), + input={"command": msg_data.get("command", "")}, + ) + ) + elif msg_type == "exec_command_end": + messages.append( + CLIMessage( + type="tool_result", + tool="exec_command", + call_id=msg_data.get("call_id", ""), + output=msg_data.get("output", ""), + ) + ) + elif msg_type == "patch_apply_begin": + messages.append( + CLIMessage( + type="tool_use", + tool="patch_apply", + call_id=msg_data.get("call_id", ""), + ) + ) + elif msg_type == "patch_apply_end": + messages.append( + CLIMessage( + type="tool_result", + tool="patch_apply", + call_id=msg_data.get("call_id", ""), + ) + ) + + return messages + + def _parse_item_notification(self, method: str, params: dict) -> list[CLIMessage]: + item = params.get("item", {}) + if not isinstance(item, dict): + return [] + + item_type = item.get("type", "") + item_id = item.get("id", "") + + if method == "item/started" and item_type == "commandExecution": + return [ + CLIMessage( + type="tool_use", + tool="exec_command", + call_id=item_id, + input={"command": item.get("command", "")}, + ) + ] + + if method == "item/completed" and item_type == "commandExecution": + return [ + CLIMessage( + type="tool_result", + tool="exec_command", + call_id=item_id, + output=item.get("aggregatedOutput", ""), + ) + ] + + if method == "item/started" and item_type == "fileChange": + return [ + CLIMessage( + type="tool_use", + tool="patch_apply", + call_id=item_id, + ) + ] + + if method == "item/completed" and item_type == "fileChange": + return [ + CLIMessage( + type="tool_result", + tool="patch_apply", + call_id=item_id, + ) + ] + + if method == "item/completed" and item_type == "agentMessage": + text = item.get("text", "") + if text: + return [CLIMessage(type="text", content=text)] + + return [] + + +def _nested_str(m: dict, *keys: str) -> str: + current = m + for key in keys: + if not isinstance(current, dict): + return "" + current = current.get(key, {}) + return current if isinstance(current, str) else "" + + +def _extract_codex_error_payload(params: dict[str, Any]) -> dict[str, Any]: + error = params.get("error") + if isinstance(error, dict): + return { + "code": str(error.get("code") or "CODEX_RUNTIME_ERROR"), + "message": str(error.get("message") or "Codex runtime error"), + "data": error.get("data") if isinstance(error.get("data"), dict) else None, + "source": "runtime", + "retryable": False, + } + return { + "code": "CODEX_RUNTIME_ERROR", + "message": str(params.get("message") or "Codex runtime error"), + "data": None, + "source": "runtime", + "retryable": False, + } diff --git a/backend/app/core/agent/cli_backends/container_bridge.py b/backend/app/core/agent/cli_backends/container_bridge.py new file mode 100644 index 000000000..6b00b0166 --- /dev/null +++ b/backend/app/core/agent/cli_backends/container_bridge.py @@ -0,0 +1,32 @@ +from __future__ import annotations + +import asyncio + +from loguru import logger + + +class ContainerProcessBridge: + async def exec_streaming( + self, + container_id: str, + cmd: list[str], + workdir: str | None = None, + env: dict[str, str] | None = None, + ) -> asyncio.subprocess.Process: + docker_cmd = ["docker", "exec", "-i"] + if workdir: + docker_cmd.extend(["-w", workdir]) + if env: + for key, value in env.items(): + docker_cmd.extend(["-e", f"{key}={value}"]) + docker_cmd.append(container_id) + docker_cmd.extend(cmd) + + logger.debug(f"container exec: {' '.join(docker_cmd[:6])}...") + + return await asyncio.create_subprocess_exec( + *docker_cmd, + stdin=asyncio.subprocess.PIPE, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + ) diff --git a/backend/app/core/agent/cli_backends/container_pool.py b/backend/app/core/agent/cli_backends/container_pool.py new file mode 100644 index 000000000..dd6da4c3a --- /dev/null +++ b/backend/app/core/agent/cli_backends/container_pool.py @@ -0,0 +1,193 @@ +""" +CLI agent container pool with TTL-based eviction and session resume. + +Keeps containers alive after execution for reuse by the same agent. +Idle containers are cleaned up after `idle_timeout` seconds (default 30 min). +On app shutdown, containers are NOT destroyed — they survive restarts. +""" + +from __future__ import annotations + +import asyncio +import time +import uuid +from dataclasses import dataclass +from typing import Optional + +from loguru import logger + +from .container_service import CLIContainerService, ContainerInfo + + +@dataclass +class PoolEntry: + container: ContainerInfo + agent_profile_id: uuid.UUID + last_used: float + created_at: float + active_count: int = 0 + last_session_id: Optional[str] = None + + +class ContainerPool: + """Pool of CLI agent containers keyed by agent_profile_id.""" + + def __init__( + self, + container_service: Optional[CLIContainerService] = None, + idle_timeout: int = 1800, + max_size: int = 20, + ): + self._pool: dict[uuid.UUID, PoolEntry] = {} + self._lock = asyncio.Lock() + self._idle_timeout = idle_timeout + self._max_size = max_size + self._shutdown = False + self.container_service = container_service or CLIContainerService() + + async def get(self, agent_profile_id: uuid.UUID) -> tuple[Optional[ContainerInfo], Optional[str]]: + """Return (container, last_session_id) if a pooled container exists.""" + async with self._lock: + if self._shutdown: + return None, None + entry = self._pool.get(agent_profile_id) + if entry: + entry.last_used = time.time() + entry.active_count += 1 + return entry.container, entry.last_session_id + return None, None + + async def put( + self, + agent_profile_id: uuid.UUID, + container: ContainerInfo, + ) -> None: + """Register a newly created container in the pool.""" + old: Optional[PoolEntry] = None + evicted: Optional[PoolEntry] = None + shutdown = False + + async with self._lock: + if self._shutdown: + shutdown = True + else: + if len(self._pool) >= self._max_size and agent_profile_id not in self._pool: + evicted = self._evict_lru_entry() + + old = self._pool.pop(agent_profile_id, None) + self._pool[agent_profile_id] = PoolEntry( + container=container, + agent_profile_id=agent_profile_id, + last_used=time.time(), + created_at=time.time(), + active_count=1, + ) + logger.info( + f"Pooled container {container.container_id[:12]} for agent " + f"{agent_profile_id} (pool_size={len(self._pool)})" + ) + + if shutdown: + await self._safe_remove(container.container_id) + return + if evicted: + await self._safe_remove(evicted.container.container_id) + if old: + await self._safe_remove(old.container.container_id) + + async def release(self, agent_profile_id: uuid.UUID) -> None: + """Decrement active count after execution finishes.""" + async with self._lock: + entry = self._pool.get(agent_profile_id) + if entry: + entry.active_count = max(0, entry.active_count - 1) + entry.last_used = time.time() + + async def release_and_destroy_if_idle(self, agent_profile_id: uuid.UUID) -> bool: + """Decrement active count; if no other executions are using it, remove the container. + + Returns True if the container was destroyed.""" + entry: Optional[PoolEntry] = None + async with self._lock: + e = self._pool.get(agent_profile_id) + if not e: + return False + e.active_count = max(0, e.active_count - 1) + if e.active_count == 0: + entry = self._pool.pop(agent_profile_id) + if entry: + await self._safe_remove(entry.container.container_id) + logger.info( + f"Destroyed idle container {entry.container.container_id[:12]} " + f"for agent {agent_profile_id} after cancel" + ) + return True + return False + + async def set_session_id(self, agent_profile_id: uuid.UUID, session_id: str) -> None: + """Store Claude session_id for next --resume.""" + async with self._lock: + entry = self._pool.get(agent_profile_id) + if entry and session_id: + entry.last_session_id = session_id + + async def remove(self, agent_profile_id: uuid.UUID) -> None: + """Force-remove a container (e.g., on execution failure).""" + entry: Optional[PoolEntry] = None + async with self._lock: + entry = self._pool.pop(agent_profile_id, None) + if entry: + await self._safe_remove(entry.container.container_id) + + async def cleanup_idle(self) -> int: + """Remove containers idle longer than idle_timeout. Returns count removed.""" + now = time.time() + to_remove: list[tuple[uuid.UUID, str]] = [] + + async with self._lock: + for agent_id, entry in list(self._pool.items()): + idle = entry.active_count == 0 and (now - entry.last_used) > self._idle_timeout + if idle: + self._pool.pop(agent_id) + to_remove.append((agent_id, entry.container.container_id)) + + for agent_id, container_id in to_remove: + await self._safe_remove(container_id) + logger.info(f"Evicted idle container {container_id[:12]} for agent {agent_id}") + + return len(to_remove) + + async def shutdown(self) -> None: + """Mark pool as shut down. Containers are NOT destroyed.""" + async with self._lock: + self._shutdown = True + self._pool.clear() + logger.info("Container pool shut down (containers left running for reuse after restart)") + + async def _evict_lru(self) -> None: + """Evict the least-recently-used idle container. Called outside lock.""" + entry = self._evict_lru_entry() + if entry: + await self._safe_remove(entry.container.container_id) + + def _evict_lru_entry(self) -> Optional[PoolEntry]: + """Pop the LRU idle entry from the pool. Must be called inside lock.""" + lru_id: Optional[uuid.UUID] = None + lru_time = float("inf") + for agent_id, entry in self._pool.items(): + if entry.active_count == 0 and entry.last_used < lru_time: + lru_time = entry.last_used + lru_id = agent_id + if lru_id: + return self._pool.pop(lru_id) + return None + + async def _safe_remove(self, container_id: str) -> None: + try: + await self.container_service.remove_container(container_id, force=True) + except Exception as exc: + logger.warning(f"Failed to remove pooled container {container_id[:12]}: {exc}") + + +# Module-level singleton +container_pool = ContainerPool() diff --git a/backend/app/core/agent/cli_backends/container_service.py b/backend/app/core/agent/cli_backends/container_service.py new file mode 100644 index 000000000..f767bffde --- /dev/null +++ b/backend/app/core/agent/cli_backends/container_service.py @@ -0,0 +1,156 @@ +""" +CLI container lifecycle management via Docker. +""" + +from __future__ import annotations + +import asyncio +import os +import tempfile +import uuid +from dataclasses import dataclass, field +from typing import Optional + +from loguru import logger + +from app.common.app_errors import ServiceUnavailableError + + +@dataclass +class ContainerConfig: + image: str = "joysafeter/cli-agent:latest" + memory_limit: str = "2g" + cpu_quota: int = 200000 + network_mode: str = "bridge" + working_dir: str = "/workspace" + labels: dict[str, str] = field(default_factory=dict) + + +@dataclass +class ContainerInfo: + container_id: str + name: str + status: str + working_dir: str + + +class CLIContainerService: + """Manages Docker container lifecycle for CLI agent executions.""" + + def __init__(self, default_config: Optional[ContainerConfig] = None): + self.default_config = default_config or ContainerConfig() + + async def create_container( + self, + *, + execution_id: uuid.UUID, + config: Optional[ContainerConfig] = None, + env: Optional[dict[str, str]] = None, + ) -> ContainerInfo: + cfg = config or self.default_config + name = f"cli-agent-{execution_id!s:.12}" + + docker_cmd = [ + "docker", + "create", + "--name", + name, + "-w", + cfg.working_dir, + f"--memory={cfg.memory_limit}", + f"--cpu-quota={cfg.cpu_quota}", + f"--network={cfg.network_mode}", + ] + for k, v in cfg.labels.items(): + docker_cmd.extend(["--label", f"{k}={v}"]) + docker_cmd.extend(["--label", f"execution_id={execution_id}"]) + + env_file_path: Optional[str] = None + if env: + env_file_path = self._write_env_file(env) + docker_cmd.extend(["--env-file", env_file_path]) + + docker_cmd.append(cfg.image) + docker_cmd.append("sleep") + docker_cmd.append("infinity") + + try: + container_id = await self._run_docker(docker_cmd) + container_id = container_id.strip() + finally: + if env_file_path: + try: + os.unlink(env_file_path) + except OSError: + pass + + await self._run_docker(["docker", "start", container_id]) + + logger.info(f"Created container {container_id[:12]} for execution {execution_id}") + return ContainerInfo( + container_id=container_id, + name=name, + status="running", + working_dir=cfg.working_dir, + ) + + @staticmethod + def _write_env_file(env: dict[str, str]) -> str: + """Write env vars to a temp file (mode 0600) and return its path.""" + fd, path = tempfile.mkstemp(prefix="cli_agent_env_", suffix=".env") + try: + with os.fdopen(fd, "w") as f: + for k, v in env.items(): + f.write(f"{k}={v}\n") + os.chmod(path, 0o600) + except Exception: + os.unlink(path) + raise + return path + + async def stop_container(self, container_id: str, timeout: int = 10) -> None: + try: + await self._run_docker(["docker", "stop", "-t", str(timeout), container_id]) + logger.info(f"Stopped container {container_id[:12]}") + except RuntimeError as exc: + logger.warning(f"Failed to stop container {container_id[:12]}: {exc}") + + async def remove_container(self, container_id: str, force: bool = True) -> None: + cmd = ["docker", "rm"] + if force: + cmd.append("-f") + cmd.append(container_id) + try: + await self._run_docker(cmd) + logger.info(f"Removed container {container_id[:12]}") + except RuntimeError as exc: + logger.warning(f"Failed to remove container {container_id[:12]}: {exc}") + + async def copy_to_container(self, container_id: str, src_path: str, dest_path: str) -> None: + await self._run_docker(["docker", "cp", src_path, f"{container_id}:{dest_path}"]) + + async def exec_in_container(self, container_id: str, cmd: list[str], workdir: Optional[str] = None) -> str: + docker_cmd = ["docker", "exec"] + if workdir: + docker_cmd.extend(["-w", workdir]) + docker_cmd.append(container_id) + docker_cmd.extend(cmd) + return await self._run_docker(docker_cmd) + + async def inspect_container(self, container_id: str) -> str: + return await self._run_docker(["docker", "inspect", "--format", "{{.State.Status}}", container_id]) + + async def _run_docker(self, cmd: list[str]) -> str: + proc = await asyncio.create_subprocess_exec( + *cmd, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + ) + stdout, stderr = await proc.communicate() + if proc.returncode != 0: + raise ServiceUnavailableError( + "Docker command failed", + code="DOCKER_COMMAND_FAILED", + data={"command": cmd, "exit_code": proc.returncode, "detail": stderr.decode()[:1000]}, + ) + return stdout.decode() diff --git a/backend/app/core/agent/cli_backends/execution_runner.py b/backend/app/core/agent/cli_backends/execution_runner.py new file mode 100644 index 000000000..6dfc981a6 --- /dev/null +++ b/backend/app/core/agent/cli_backends/execution_runner.py @@ -0,0 +1,483 @@ +""" +ExecutionRunner — end-to-end orchestrator for CLI agent executions. + +Lifecycle: + 1. Get or create container (from pool) + 2. Inject credentials, skills, and CLAUDE.md config + 3. Execute via RuntimeProvider (with session resume if available) + 4. Drain messages → append as ExecutionEvents + 5. Mark final status, store session_id back to pool + 6. Release container back to pool (NOT destroyed) +""" + +from __future__ import annotations + +import asyncio +import uuid +from typing import TYPE_CHECKING, Any, Optional + +from loguru import logger + +from app.common.app_errors import normalize_app_error +from app.core.agent.cli_backends.base import CLIMessage, CLIResult, RuntimeSession, build_control_response +from app.core.agent.cli_backends.container_pool import container_pool +from app.core.agent.cli_backends.container_service import ( + CLIContainerService, + ContainerConfig, + ContainerInfo, +) +from app.core.agent.cli_backends.injectors import ( + CLISkillInjector, + RuntimeConfigInjector, +) +from app.core.agent.cli_backends.registry import runtime_registry +from app.core.agent.cli_backends.runner_callbacks import RunnerCallbacks +from app.core.agent.cli_backends.session_registry import session_registry +from app.core.events.event_types import ExecutionEventType +from app.core.ports.execution import EventContext, ExecutionEventPort, ExecutionReaderPort + +if TYPE_CHECKING: + from app.models.agent import AgentRelease + + +class ExecutionRunner: + """Orchestrates the full lifecycle of a CLI agent execution.""" + + def __init__( + self, + event_port: ExecutionEventPort, + reader_port: ExecutionReaderPort, + container_service: Optional[CLIContainerService] = None, + callbacks: Optional[RunnerCallbacks] = None, + ): + self._events = event_port + self._reader = reader_port + self.container_service = container_service or CLIContainerService() + self.callbacks = callbacks + self._auto_approve: bool = True + self._session: Optional[RuntimeSession] = None + + async def run( + self, + *, + execution_id: uuid.UUID, + prompt: str, + credentials: Optional[dict[str, str]] = None, + skills: Optional[list[dict[str, Any]]] = None, + container_config: Optional[ContainerConfig] = None, + model: Optional[str] = None, + timeout: int = 7200, + collector: Any = None, + ) -> CLIResult: + """Run a full execution lifecycle. + + Returns the final CLIResult after the agent completes or fails. + """ + container: Optional[ContainerInfo] = None + execution = await self._reader.get_execution(execution_id) + run = await self._reader.get_run_for_execution(execution_id) + release = await self._reader.get_release_for_run(run.id) + pooled = False # whether the container came from the pool + + # Inject run metadata so events route through the bus + self._events.set_event_context( + EventContext( + run_id=run.id, + workspace_id=run.workspace_id, + trigger_medium=run.trigger_medium, + run_purpose=run.run_purpose, + thread_id=run.thread_id, + task_id=run.task_id, + ) + ) + + logger.info( + f"[exec:{execution_id}] Starting execution " + f"(release={release.id if release else 'draft'}, " + f"engine={execution.engine_kind})" + ) + + try: + # 1. Mark as dispatched + await self._events.mark_status(execution_id=execution_id, status="dispatched") + + # 2. Get or create container + prior_session_id: Optional[str] = None + if release: + container, prior_session_id = await container_pool.get(release.id) + + if container: + pooled = True + # Verify container is still running + try: + status = await self.container_service.inspect_container(container.container_id) + if "running" not in status.strip().lower(): + logger.warning( + f"[exec:{execution_id}] Pooled container {container.container_id[:12]} " + f"not running (status={status.strip()}), creating new one" + ) + if release: + await container_pool.remove(release.id) + container = None + prior_session_id = None + pooled = False + except Exception as inspect_exc: + logger.warning(f"[exec:{execution_id}] Failed to inspect pooled container: {inspect_exc}") + if release: + await container_pool.remove(release.id) + container = None + prior_session_id = None + pooled = False + + if not container: + logger.info(f"[exec:{execution_id}] Creating new container") + container = await self.container_service.create_container( + execution_id=execution_id, + config=container_config, + env=credentials, + ) + if release: + await container_pool.put(release.id, container) + pooled = True + + if prior_session_id: + logger.info( + f"[exec:{execution_id}] Reusing pooled container " + f"{container.container_id[:12]} with session {prior_session_id}" + ) + + await self._events.mark_status( + execution_id=execution_id, status="running", container_id=container.container_id + ) + + # 3. Inject skills and config (idempotent — safe to re-run on reuse) + await self._inject( + container_id=container.container_id, + skills=skills, + release=release, + working_dir=container.working_dir, + ) + + # 4. Record execution_started event + await self._events.append_event( + execution_id=execution_id, + event_type=ExecutionEventType.EXECUTION_STARTED, + payload={ + "container_id": container.container_id, + "engine_kind": execution.engine_kind, + "reused": prior_session_id is not None, + }, + ) + + # 5. Execute via provider (with session resume + credentials) + provider = runtime_registry.get(execution.engine_kind) + + # Determine auto_approve from task settings + if run.task_id: + self._auto_approve = await self._reader.get_task_auto_approve(run.task_id) + else: + self._auto_approve = True + + session = await provider.execute( + prompt, + container_id=container.container_id, + cwd=container.working_dir, + model=model, + timeout=timeout, + resume_session_id=prior_session_id, + env=credentials, + auto_approve=self._auto_approve, + ) + + # 5b. Register session so the API layer can inject messages + session_registry.register(execution_id, session) + self._session = session + + # 6. Drain messages → events + await self._drain_to_events(execution_id, collector=collector, engine_kind=execution.engine_kind) + + # 7. Await final result + result = await session.result + + # 8. Mark final status + await self._finalize(execution_id, result, release) + + # 9. Store session_id back to pool for next resume + if result.session_id and release: + await container_pool.set_session_id(release.id, result.session_id) + logger.info(f"[exec:{execution_id}] Stored session {result.session_id} for release {release.id}") + + return result + + except Exception as exc: + logger.error(f"[exec:{execution_id}] ExecutionRunner error: {exc}") + await self._mark_failed(execution_id, str(exc)) + app_error = normalize_app_error( + exc, + default_code="CLI_EXECUTION_RUNNER_FAILED", + default_message="CLI execution runner failed", + default_data={"execution_id": str(execution_id)}, + ) + return CLIResult(status="failed", error=app_error.message, error_payload=app_error.to_payload()) + + finally: + # 10. Unregister session; release container back to pool + session_registry.unregister(execution_id) + if container and release and pooled: + await container_pool.release(release.id) + logger.info(f"[exec:{execution_id}] Released container {container.container_id[:12]} back to pool") + elif container: + await self._cleanup_container(container.container_id) + logger.info(f"[exec:{execution_id}] Destroyed container {container.container_id[:12]} (no release)") + + async def _inject( + self, + *, + container_id: str, + skills: Optional[list[dict[str, Any]]], + release: Optional[AgentRelease], + working_dir: str, + ) -> None: + skill_injector = CLISkillInjector(self.container_service) + config_injector = RuntimeConfigInjector(self.container_service) + + if skills: + await skill_injector.inject(container_id, skills) + + # Pull instructions from release runtime_binding if present + instructions = release.runtime_binding.get("instructions") if release else None + skill_names = None + if skills: + skill_names = [s.get("name", "") for s in skills if s.get("name")] + + await config_injector.inject( + container_id, + instructions=instructions, + skill_names=skill_names, + working_dir=working_dir, + ) + + _DRAIN_BATCH_SIZE = 5 + _DRAIN_FLUSH_INTERVAL = 0.5 # seconds — flush at least every 500ms + + async def _drain_to_events( + self, + execution_id: uuid.UUID, + *, + collector: Any = None, + engine_kind: str = "cli", + ) -> None: + assert self._session is not None, "_drain_to_events called before session was set" + pending: list[tuple[CLIMessage, str, dict[str, Any]]] = [] + logger.info(f"[exec:{execution_id}] _drain_to_events started") + queue = self._session.messages + + # Observation: set up root span + extractor if collector is set + root_span = None + extractor = None + if collector: + from app.core.observation.instrumentation.cli_extractor import CLIObservationExtractor + + root_span = collector.start_agent(name=f"cli:{engine_kind}") + extractor = CLIObservationExtractor(collector, root_span) + + while True: + try: + msg = await asyncio.wait_for(queue.get(), timeout=self._DRAIN_FLUSH_INTERVAL) + except asyncio.TimeoutError: + # Timeout — flush whatever we have so far + if pending: + await self._flush_pending(execution_id, pending) + pending.clear() + continue + + if msg is None: + break + + # Observation: process message through extractor + if extractor: + try: + await extractor.process_message(msg) + except Exception as obs_exc: + logger.debug(f"[exec:{execution_id}] Observation extractor error: {obs_exc}") + + event_type = self._msg_to_event_type(msg) + payload = self._msg_to_payload(msg) + pending.append((msg, event_type, payload)) + + needs_flush = len(pending) >= self._DRAIN_BATCH_SIZE or msg.type == "approval_request" + if needs_flush: + await self._flush_pending(execution_id, pending) + pending.clear() + + if pending: + await self._flush_pending(execution_id, pending) + + # Observation: flush pending and close root span + if extractor: + try: + await extractor.flush_pending() + except Exception as obs_exc: + logger.debug(f"[exec:{execution_id}] Observation extractor flush error: {obs_exc}") + if root_span: + try: + await root_span.end(output={"status": "completed"}) + except Exception: + pass + + logger.info(f"[exec:{execution_id}] _drain_to_events finished") + + async def _flush_pending( + self, + execution_id: uuid.UUID, + pending: list[tuple[CLIMessage, str, dict[str, Any]]], + ) -> None: + try: + await self._events.batch_append_events( + execution_id=execution_id, + events=[{"event_type": event_type, "payload": payload} for _, event_type, payload in pending], + ) + for msg, _, payload in pending: + if msg.type == "approval_request": + if self._auto_approve: + request_id = payload.get("request_id", "") + assert self._session is not None + await self._session.inject_message(build_control_response(request_id, "allow")) + await self._events.append_event( + execution_id=execution_id, + event_type=ExecutionEventType.APPROVAL_RESOLVED, + payload={"decision": "auto_approved", "request_id": request_id}, + ) + else: + await self._events.mark_status(execution_id=execution_id, status="approval_wait") + break + except Exception as exc: + logger.warning(f"Failed to flush {len(pending)} events for {execution_id}: {exc}") + + async def _finalize( + self, + execution_id: uuid.UUID, + result: CLIResult, + release: Optional[AgentRelease], + ) -> None: + status = "succeeded" if result.status == "completed" else "failed" + + await self._events.complete_execution( + execution_id=execution_id, + terminal_status=status, + result_summary=result.usage, + error=result.error_payload or _build_completion_error(result.error), + session_id=result.session_id, + ) + + if self.callbacks: + try: + await self.callbacks.on_execution_finalized(execution_id, status, result) + except Exception as exc: + logger.warning(f"Callback on_execution_finalized failed for {execution_id}: {exc}") + + async def _mark_failed( + self, + execution_id: uuid.UUID, + error: str, + ) -> None: + try: + error_payload = _build_completion_error(error[:2000]) + await self._events.append_event( + execution_id=execution_id, + event_type=ExecutionEventType.ERROR, + payload=error_payload + or { + "code": "EXECUTION_FAILED", + "message": error, + "data": None, + "source": "runtime", + "retryable": False, + }, + ) + await self._events.complete_execution( + execution_id=execution_id, + terminal_status="failed", + error=error_payload, + ) + except Exception as exc: + logger.error(f"Failed to mark execution {execution_id} as failed: {exc}") + + if self.callbacks: + try: + await self.callbacks.on_execution_failed(execution_id, error) + except Exception as exc: + logger.warning(f"Callback on_execution_failed failed for {execution_id}: {exc}") + + async def _cleanup_container(self, container_id: str) -> None: + try: + await self.container_service.remove_container(container_id, force=True) + except Exception as exc: + logger.warning(f"Failed to cleanup container {container_id[:12]}: {exc}") + + @staticmethod + def _msg_to_event_type(msg: CLIMessage) -> ExecutionEventType: + mapping = { + "text": ExecutionEventType.ASSISTANT_TEXT, + "thinking": ExecutionEventType.THINKING, + "tool_use": ExecutionEventType.TOOL_USE_START, + "tool_result": ExecutionEventType.TOOL_USE_END, + "error": ExecutionEventType.ERROR, + "artifact": ExecutionEventType.ARTIFACT_CREATED, + "approval_request": ExecutionEventType.APPROVAL_REQUESTED, + } + return mapping.get(msg.type) or ExecutionEventType(msg.type) + + @staticmethod + def _msg_to_payload(msg: CLIMessage) -> dict[str, Any]: + if msg.type == "text": + return {"content": msg.content} + if msg.type == "thinking": + return {"content": msg.content} + if msg.type == "tool_use": + return { + "tool": { + "name": msg.tool, + "call_id": msg.call_id, + "input": msg.input, + "status": "running", + }, + } + if msg.type == "tool_result": + return { + "call_id": msg.call_id, + "tool_name": msg.tool, + "output": msg.output, + } + if msg.type == "error": + return msg.error_payload or { + "code": "EXECUTION_FAILED", + "message": msg.content, + "data": None, + "source": "runtime", + "retryable": False, + } + if msg.type == "artifact": + return {"artifact": {"content": msg.content}} + if msg.type == "approval_request": + return { + "request_id": msg.call_id, + "subtype": msg.content, + "tool_name": msg.tool, + "input": msg.input, + "message": f"Agent wants to use: {msg.tool or 'unknown tool'}", + } + return {"content": msg.content} + + +def _build_completion_error(message: str | None) -> dict[str, Any] | None: + if not message: + return None + + return { + "code": "EXECUTION_FAILED", + "message": message, + "data": None, + "source": "runtime", + "retryable": False, + } diff --git a/backend/app/core/agent/cli_backends/injectors.py b/backend/app/core/agent/cli_backends/injectors.py new file mode 100644 index 000000000..6a1ca34f4 --- /dev/null +++ b/backend/app/core/agent/cli_backends/injectors.py @@ -0,0 +1,100 @@ +""" +Skill and runtime config injectors for CLI agent containers. +""" + +from __future__ import annotations + +import json +from typing import Any, Optional + +from loguru import logger + +from .container_service import CLIContainerService + + +class CLISkillInjector: + """Writes skill definitions into the container filesystem.""" + + def __init__(self, container_service: CLIContainerService): + self.container_service = container_service + + async def inject( + self, + container_id: str, + skills: list[dict[str, Any]], + target_dir: str = "/workspace/.skills", + ) -> None: + if not skills: + return + await self.container_service.exec_in_container(container_id, ["mkdir", "-p", target_dir]) + for skill in skills: + name = skill.get("name", "unnamed") + filename = f"{target_dir}/{name}.json" + content = json.dumps(skill, indent=2) + await self.container_service.exec_in_container( + container_id, + ["sh", "-c", f"cat > {filename} << 'SKILLEOF'\n{content}\nSKILLEOF"], + ) + logger.debug(f"Injected {len(skills)} skills into {container_id[:12]}") + + +class RuntimeConfigInjector: + """Generates and writes CLAUDE.md configuration into the container.""" + + def __init__(self, container_service: CLIContainerService): + self.container_service = container_service + + async def inject( + self, + container_id: str, + *, + instructions: Optional[str] = None, + skill_names: Optional[list[str]] = None, + project_context: Optional[str] = None, + working_dir: str = "/workspace", + ) -> None: + claude_md = self._build_claude_md( + instructions=instructions, + skill_names=skill_names, + project_context=project_context, + ) + target = f"{working_dir}/CLAUDE.md" + await self.container_service.exec_in_container( + container_id, + ["sh", "-c", f"cat > {target} << 'CLAUDEEOF'\n{claude_md}\nCLAUDEEOF"], + ) + logger.debug(f"Injected CLAUDE.md into {container_id[:12]}") + + def _build_claude_md( + self, + *, + instructions: Optional[str] = None, + skill_names: Optional[list[str]] = None, + project_context: Optional[str] = None, + ) -> str: + sections: list[str] = [] + sections.append("# Agent Configuration") + sections.append("") + sections.append("You are an autonomous coding agent executing a task.") + sections.append("Complete the task thoroughly and commit your work when done.") + + if instructions: + sections.append("") + sections.append("## Instructions") + sections.append("") + sections.append(instructions) + + if skill_names: + sections.append("") + sections.append("## Available Skills") + sections.append("") + for name in skill_names: + sections.append(f"- {name}") + + if project_context: + sections.append("") + sections.append("## Project Context") + sections.append("") + sections.append(project_context) + + return "\n".join(sections) diff --git a/backend/app/core/agent/cli_backends/openclaw.py b/backend/app/core/agent/cli_backends/openclaw.py new file mode 100644 index 000000000..a75a43550 --- /dev/null +++ b/backend/app/core/agent/cli_backends/openclaw.py @@ -0,0 +1,327 @@ +from __future__ import annotations + +import asyncio +import json +import time +from typing import Any + +from loguru import logger + +from .base import CLIMessage, CLIResult, RuntimeSession +from .container_bridge import ContainerProcessBridge + + +class OpenClawProvider: + """Runtime provider for OpenClaw CLI. + + OpenClaw outputs NDJSON events on *stderr* (not stdout). We spawn + ``openclaw agent --local --json --session-id --message `` + and read stderr line-by-line. + + Event types (from the wire protocol): + text, tool_use, tool_result, error, lifecycle, step_start, step_finish + """ + + provider_type = "openclaw" + + def __init__(self, executable_path: str = "openclaw") -> None: + self.executable_path = executable_path + self.bridge = ContainerProcessBridge() + + # ── public API ────────────────────────────────────────────────────── + + async def execute( + self, + prompt: str, + *, + container_id: str, + cwd: str | None = None, + model: str | None = None, + timeout: int = 7200, + resume_session_id: str | None = None, + env: dict[str, str] | None = None, + auto_approve: bool = True, + ) -> RuntimeSession: + session_id = resume_session_id or f"joysafeter-{int(time.time() * 1e9)}" + + cmd = [ + self.executable_path, + "agent", + "--local", + "--json", + "--session-id", + session_id, + ] + if model: + cmd.extend(["--model", model]) + cmd.extend(["--message", prompt]) + + process = await self.bridge.exec_streaming( + container_id, + cmd, + env=env, + workdir=cwd, + ) + + queue: asyncio.Queue[CLIMessage | None] = asyncio.Queue(maxsize=512) + loop = asyncio.get_event_loop() + result_future: asyncio.Future[CLIResult] = loop.create_future() + + drain_task = asyncio.create_task( + self._drain(process, queue, result_future, session_id, timeout), + name=f"openclaw-drain-{container_id[:12]}", + ) + + async def inject(message: str) -> None: + if process.stdin and not process.stdin.is_closing(): + process.stdin.write(f"{message}\n".encode()) + await process.stdin.drain() + + async def cancel() -> None: + process.terminate() + + return RuntimeSession( + messages=queue, + result=result_future, + _inject_fn=inject, + _cancel_fn=cancel, + _drain_task=drain_task, + ) + + # ── drain loop ────────────────────────────────────────────────────── + + async def _drain( + self, + process: asyncio.subprocess.Process, + queue: asyncio.Queue[CLIMessage | None], + result_future: asyncio.Future[CLIResult], + session_id: str, + timeout: int, + ) -> None: + accumulated_text: list[str] = [] + final_status = "completed" + final_error = "" + final_error_payload: dict[str, Any] | None = None + + try: + async with asyncio.timeout(timeout): + # OpenClaw writes JSON events to stderr + assert process.stderr is not None + async for raw_line in process.stderr: + line = raw_line.decode().strip() + if not line: + continue + + for msg in self._parse_line(line): + if msg.type == "text": + accumulated_text.append(msg.content) + if msg.type == "error": + final_status = "failed" + final_error = msg.content + final_error_payload = msg.error_payload + await queue.put(msg) + + except TimeoutError: + if not result_future.done(): + result_future.set_result( + CLIResult( + status="timeout", + error="OpenClaw agent timed out", + error_payload={ + "code": "OPENCLAW_AGENT_TIMEOUT", + "message": "OpenClaw agent timed out", + "data": {"session_id": session_id}, + "source": "runtime", + "retryable": True, + }, + session_id=session_id, + ) + ) + except Exception as e: + logger.error(f"OpenClaw drain error: {e}") + if not result_future.done(): + result_future.set_result( + CLIResult( + status="failed", + error=str(e), + error_payload={ + "code": "OPENCLAW_AGENT_DRAIN_FAILED", + "message": str(e), + "data": {"session_id": session_id}, + "source": "runtime", + "retryable": False, + }, + session_id=session_id, + ) + ) + finally: + if not result_future.done(): + exit_code = await process.wait() + if final_status == "failed": + result_future.set_result( + CLIResult( + status="failed", + output="\n".join(accumulated_text), + error=final_error, + error_payload=final_error_payload, + session_id=session_id, + ) + ) + elif exit_code == 0 or accumulated_text: + result_future.set_result( + CLIResult( + status="completed", + output="\n".join(accumulated_text), + session_id=session_id, + ) + ) + else: + stdout_bytes = await process.stdout.read() if process.stdout else b"" + result_future.set_result( + CLIResult( + status="failed", + error=f"Exit code {exit_code}: {stdout_bytes.decode()[:2000]}", + error_payload={ + "code": "OPENCLAW_AGENT_EXIT_FAILED", + "message": f"Exit code {exit_code}: {stdout_bytes.decode()[:2000]}", + "data": {"session_id": session_id, "exit_code": exit_code}, + "source": "runtime", + "retryable": False, + }, + session_id=session_id, + ) + ) + await queue.put(None) + + # ── event parsing (testable without Docker) ───────────────────────── + + def _parse_line(self, line: str) -> list[CLIMessage]: + """Parse a single stderr line into CLIMessage list. + + Non-JSON lines (plain log output) are silently skipped. + """ + if not line or line[0] != "{": + return [] + + try: + event = json.loads(line) + except json.JSONDecodeError: + return [] + + if not isinstance(event, dict) or "type" not in event: + return [] + + return self._parse_event(event) + + def _parse_event(self, event: dict) -> list[CLIMessage]: + """Map an OpenClaw NDJSON event to CLIMessage(s).""" + event_type = event.get("type", "") + + if event_type == "text": + text = event.get("text", "") + if text: + return [CLIMessage(type="text", content=text)] + return [] + + if event_type == "tool_use": + input_data = event.get("input") + if isinstance(input_data, str): + try: + input_data = json.loads(input_data) + except json.JSONDecodeError: + input_data = None + return [ + CLIMessage( + type="tool_use", + tool=event.get("tool", ""), + call_id=event.get("callId", ""), + input=input_data if isinstance(input_data, dict) else None, + ) + ] + + if event_type == "tool_result": + return [ + CLIMessage( + type="tool_result", + tool=event.get("tool", ""), + call_id=event.get("callId", ""), + output=event.get("text", ""), + ) + ] + + if event_type == "error": + error_payload = _extract_error_payload(event) + return [CLIMessage(type="error", content=error_payload["message"], error_payload=error_payload)] + + if event_type == "lifecycle": + phase = event.get("phase", "") + if phase in ("error", "failed", "cancelled"): + error_payload = _extract_error_payload(event) + return [CLIMessage(type="error", content=error_payload["message"], error_payload=error_payload)] + return [] + + if event_type == "step_start": + return [CLIMessage(type="text", content="[step started]")] + + if event_type == "step_finish": + return [CLIMessage(type="text", content="[step finished]")] + + # Unknown event type — skip + return [] + + +def _extract_error_payload(event: dict) -> dict[str, Any]: + """Extract canonical error payload from an OpenClaw event.""" + err_obj = event.get("error") + if isinstance(err_obj, dict): + code = err_obj.get("code") or event.get("code") or "OPENCLAW_AGENT_ERROR" + data = err_obj.get("data") if isinstance(err_obj.get("data"), dict) else None + if err_obj.get("message"): + return { + "code": str(code), + "message": str(err_obj["message"]), + "data": data, + "source": "runtime", + "retryable": False, + } + if isinstance(data, dict) and data.get("message"): + return { + "code": str(code), + "message": str(data["message"]), + "data": data, + "source": "runtime", + "retryable": False, + } + if err_obj.get("name"): + return { + "code": str(code), + "message": str(err_obj["name"]), + "data": data, + "source": "runtime", + "retryable": False, + } + + if event.get("text"): + return { + "code": str(event.get("code") or "OPENCLAW_AGENT_ERROR"), + "message": str(event["text"]), + "data": None, + "source": "runtime", + "retryable": False, + } + if event.get("message"): + return { + "code": str(event.get("code") or "OPENCLAW_AGENT_ERROR"), + "message": str(event["message"]), + "data": None, + "source": "runtime", + "retryable": False, + } + + return { + "code": str(event.get("code") or "OPENCLAW_AGENT_ERROR"), + "message": "unknown openclaw error", + "data": None, + "source": "runtime", + "retryable": False, + } diff --git a/backend/app/core/agent/cli_backends/registry.py b/backend/app/core/agent/cli_backends/registry.py new file mode 100644 index 000000000..8363304ee --- /dev/null +++ b/backend/app/core/agent/cli_backends/registry.py @@ -0,0 +1,41 @@ +from __future__ import annotations + +from loguru import logger + +from app.common.app_errors import NotFoundError + +from .base import RuntimeProvider + + +class RuntimeProviderRegistry: + def __init__(self) -> None: + self._providers: dict[str, RuntimeProvider] = {} + + def register(self, provider: RuntimeProvider) -> None: + self._providers[provider.provider_type] = provider + logger.info(f"Registered runtime provider: {provider.provider_type}") + + def get(self, provider_type: str) -> RuntimeProvider: + if provider_type not in self._providers: + raise NotFoundError( + "Runtime provider not found", + code="RUNTIME_PROVIDER_NOT_FOUND", + data={"provider_type": provider_type, "available_providers": list(self._providers.keys())}, + ) + return self._providers[provider_type] + + def list_providers(self) -> list[str]: + return list(self._providers.keys()) + + +runtime_registry = RuntimeProviderRegistry() + + +def init_providers() -> None: + from .claude_code import ClaudeCodeProvider + from .codex import CodexProvider + from .openclaw import OpenClawProvider + + runtime_registry.register(ClaudeCodeProvider()) + runtime_registry.register(CodexProvider()) + runtime_registry.register(OpenClawProvider()) diff --git a/backend/app/core/agent/cli_backends/runner_callbacks.py b/backend/app/core/agent/cli_backends/runner_callbacks.py new file mode 100644 index 000000000..d81c9a98a --- /dev/null +++ b/backend/app/core/agent/cli_backends/runner_callbacks.py @@ -0,0 +1,35 @@ +""" +Callback protocol for ExecutionRunner — breaks circular dependency. + +ExecutionRunner calls these hooks after finalize/failure. +The concrete implementation lives in ExecutionLifecycleService. +""" + +from __future__ import annotations + +import uuid +from typing import Protocol, runtime_checkable + +from app.core.agent.cli_backends.base import CLIResult + + +@runtime_checkable +class RunnerCallbacks(Protocol): + """Interface that ExecutionRunner uses to notify lifecycle events.""" + + async def on_execution_finalized( + self, + execution_id: uuid.UUID, + status: str, + result: CLIResult, + ) -> None: + """Called after execution reaches terminal state (completed/failed).""" + ... + + async def on_execution_failed( + self, + execution_id: uuid.UUID, + error: str, + ) -> None: + """Called when runner catches an unhandled exception.""" + ... diff --git a/backend/app/core/agent/cli_backends/session_registry.py b/backend/app/core/agent/cli_backends/session_registry.py new file mode 100644 index 000000000..b3b92cd1a --- /dev/null +++ b/backend/app/core/agent/cli_backends/session_registry.py @@ -0,0 +1,28 @@ +"""In-memory registry of active RuntimeSessions, keyed by execution_id.""" + +from __future__ import annotations + +import uuid +from typing import Optional + +from .base import RuntimeSession + + +class SessionRegistry: + """Thread-safe registry of active execution sessions.""" + + def __init__(self) -> None: + self._sessions: dict[uuid.UUID, RuntimeSession] = {} + + def register(self, execution_id: uuid.UUID, session: RuntimeSession) -> None: + self._sessions[execution_id] = session + + def get(self, execution_id: uuid.UUID) -> Optional[RuntimeSession]: + return self._sessions.get(execution_id) + + def unregister(self, execution_id: uuid.UUID) -> None: + self._sessions.pop(execution_id, None) + + +# Module-level singleton +session_registry = SessionRegistry() diff --git a/backend/app/core/agent/code_agent/executor/docker_executor.py b/backend/app/core/agent/code_agent/executor/docker_executor.py index 29b5525cd..bb267c0bf 100644 --- a/backend/app/core/agent/code_agent/executor/docker_executor.py +++ b/backend/app/core/agent/code_agent/executor/docker_executor.py @@ -13,6 +13,8 @@ from loguru import logger +from app.common.app_errors import ServiceUnavailableError + from .base import CodeOutput, PythonExecutor @@ -103,13 +105,18 @@ def _get_backend(self): except ImportError as e: logger.error(f"Failed to import PydanticSandboxAdapter: {e}") - raise RuntimeError( - "PydanticSandboxAdapter is required for DockerPythonExecutor. " - "Please ensure pydantic-ai-backend[docker] is installed." + raise ServiceUnavailableError( + "PydanticSandboxAdapter is required for DockerPythonExecutor", + code="DOCKER_EXECUTOR_DEPENDENCY_MISSING", + data={"dependency": "pydantic-ai-backend[docker]"}, ) from e except Exception as e: logger.error(f"Failed to create PydanticSandboxAdapter: {e}") - raise RuntimeError(f"Failed to initialize Docker backend: {e}") from e + raise ServiceUnavailableError( + "Failed to initialize Docker backend", + code="DOCKER_EXECUTOR_INIT_FAILED", + data={"detail": str(e)}, + ) from e return self._backend diff --git a/backend/app/core/agent/code_agent/interpreter/ast_evaluator.py b/backend/app/core/agent/code_agent/interpreter/ast_evaluator.py index 93b63ca66..d68063416 100644 --- a/backend/app/core/agent/code_agent/interpreter/ast_evaluator.py +++ b/backend/app/core/agent/code_agent/interpreter/ast_evaluator.py @@ -1335,13 +1335,6 @@ def evaluate_ast( elif isinstance(expression, ast.Delete): return evaluate_delete(expression, *common_params) - # Python 3.8 Index node (backward compatibility) - elif hasattr(ast, "Index") and isinstance(expression, ast.Index): - value = getattr(expression, "value", None) - if value is not None: - return evaluate_ast(value, *common_params) - raise InterpreterError("Index node has no value attribute") - else: raise InterpreterError(f"{expression.__class__.__name__} is not supported.") diff --git a/backend/app/core/agent/code_agent/utils.py b/backend/app/core/agent/code_agent/utils.py index e60b3d800..b4e96a51d 100644 --- a/backend/app/core/agent/code_agent/utils.py +++ b/backend/app/core/agent/code_agent/utils.py @@ -18,6 +18,8 @@ from loguru import logger +from app.common.app_errors import InternalServiceError + __all__ = [ "RateLimiter", "Retrying", @@ -328,7 +330,11 @@ async def __call__( # Should not reach here, but just in case if last_exception: raise last_exception - raise RuntimeError("Unexpected state in retry logic") + raise InternalServiceError( + "Unexpected state in async retry logic", + code="RETRY_STATE_INVALID", + data={"mode": "async"}, + ) def call_sync( self, @@ -376,7 +382,11 @@ def call_sync( if last_exception: raise last_exception - raise RuntimeError("Unexpected state in retry logic") + raise InternalServiceError( + "Unexpected state in sync retry logic", + code="RETRY_STATE_INVALID", + data={"mode": "sync"}, + ) def wrap(self, func: Callable[..., T]) -> Callable[..., T]: """ diff --git a/backend/app/core/agent/coordinator_tools.py b/backend/app/core/agent/coordinator_tools.py new file mode 100644 index 000000000..ca3917dbe --- /dev/null +++ b/backend/app/core/agent/coordinator_tools.py @@ -0,0 +1,217 @@ +"""LangGraph tools for Coordinator agents to spawn and manage CLI agents.""" + +from __future__ import annotations + +import asyncio +import uuid + +from loguru import logger +from sqlalchemy import select + +from app.core.agent.cli_backends.base import CLIResult +from app.core.database import async_session_factory +from app.models.execution import Execution +from app.services.execution_orchestrator import ExecutionOrchestrator +from app.utils.safe_task import safe_create_task + +# Execution status string literals +EXECUTION_STATUS_COMPLETED = "succeeded" +EXECUTION_STATUS_FAILED = "failed" + + +async def spawn_agent( + agent_name: str, + prompt: str, + *, + workspace_id: str, + user_id: str, + parent_execution_id: str, + runtime_type: str = "claude_code", + model: str | None = None, + wait: bool = True, + timeout: int = 3600, +) -> dict: + """ + Spawn a CLI agent to execute a sub-task. + + Args: + agent_name: Display name for the spawned agent + prompt: Task description for the agent + workspace_id: Workspace context + user_id: User who initiated the parent execution + parent_execution_id: The coordinator's execution ID + runtime_type: CLI type ("claude_code", "codex", "openclaw") + model: Optional model override + wait: If True, wait for completion and return result + timeout: Max wait time in seconds + + Returns: + dict with execution_id, status, and output (if wait=True) + """ + ws_id = uuid.UUID(workspace_id) + parent_id = uuid.UUID(parent_execution_id) + + async with async_session_factory() as db: + from app.models.agent_run import AgentRun + + parent_identity = ( + await db.execute( + select(AgentRun.release_id, AgentRun.agent_version_id) + .join(Execution, AgentRun.id == Execution.run_id) + .where(Execution.id == parent_id) + ) + ).one_or_none() + if not parent_identity: + raise ValueError(f"Parent execution {parent_id} not found") + + parent_release = parent_identity[0] + parent_version = None if parent_release else parent_identity[1] + + run = AgentRun( + release_id=parent_release, + agent_version_id=parent_version, + workspace_id=ws_id, + trigger_medium="system", + run_purpose="production", + goal=f"[Sub] {agent_name}: {prompt[:80]}", + status="pending", + created_by=user_id, + ) + db.add(run) + await db.flush() + + from app.services.execution_service import ExecutionService + + svc = ExecutionService(db) + execution = await svc.create_execution( + run_id=run.id, + runtime_type=runtime_type, + parent_execution_id=parent_id, + ) + run.current_execution_id = execution.id + await db.commit() + exec_id = execution.id + + await ExecutionOrchestrator.publish_run_status_change( + db, + run, + execution_id=execution.id, + target_status="running", + ) + + logger.info(f"Coordinator spawned {agent_name} ({runtime_type}) -> execution {exec_id}") + + if wait: + return await _run_and_wait(exec_id, prompt, ws_id, user_id, runtime_type, model, timeout, agent_name) + else: + _fire_and_forget(exec_id, prompt, ws_id, user_id, runtime_type, model) + return { + "execution_id": str(exec_id), + "status": "dispatched", + "output": "", + } + + +async def _run_and_wait( + exec_id: uuid.UUID, + prompt: str, + ws_id: uuid.UUID, + user_id: str, + runtime_type: str, + model: str | None, + timeout: int, + agent_name: str, +) -> dict: + """Run the execution synchronously and return the result.""" + try: + async with async_session_factory() as db: + from app.services.runner_factory import create_execution_runner + + runner = create_execution_runner(db) + result: CLIResult = await asyncio.wait_for( + runner.run( + execution_id=exec_id, + prompt=prompt, + model=model, + timeout=timeout, + ), + timeout=timeout, + ) + return { + "execution_id": str(exec_id), + "status": result.status, + "output": result.output[:5000], + "session_id": result.session_id, + } + except asyncio.TimeoutError: + return { + "execution_id": str(exec_id), + "status": "timeout", + "output": f"Agent '{agent_name}' timed out after {timeout}s", + } + except Exception as e: + logger.error(f"spawn_agent error for {exec_id}: {e}") + return { + "execution_id": str(exec_id), + "status": "failed", + "output": str(e)[:2000], + } + + +def _fire_and_forget( + exec_id: uuid.UUID, + prompt: str, + ws_id: uuid.UUID, + user_id: str, + runtime_type: str, + model: str | None, +) -> None: + """Dispatch execution as a background task.""" + + async def _background(): + async with async_session_factory() as db: + from app.services.runner_factory import create_execution_runner + + runner = create_execution_runner(db) + await runner.run(execution_id=exec_id, prompt=prompt, model=model) + + safe_create_task( + _background(), + name=f"coordinator-child-{exec_id}", + ) + + +async def get_agent_result(execution_id: str, *, user_id: str) -> dict: + """ + Get the result of a previously spawned agent. + + Args: + execution_id: The execution ID returned by spawn_agent + user_id: The user who owns the execution + + Returns: + dict with status and output + """ + exec_id = uuid.UUID(execution_id) + + async with async_session_factory() as db: + from app.services.execution_service import ExecutionService + + svc = ExecutionService(db) + execution = await svc.get_execution(exec_id, user_id) + + if not execution: + return {"status": "not_found", "output": ""} + + status = execution.status.value if hasattr(execution.status, "value") else str(execution.status) + + if status == EXECUTION_STATUS_COMPLETED: + output = "" + if execution.metrics: + output = execution.metrics.get("output", "") + return {"status": "succeeded", "output": output} + elif status == EXECUTION_STATUS_FAILED: + error = execution.error or {} + return {"status": "failed", "output": error.get("message") or "Unknown error"} + else: + return {"status": status, "output": f"Agent is still {status}"} diff --git a/backend/app/core/agent/langfuse_callback.py b/backend/app/core/agent/langfuse_callback.py deleted file mode 100644 index 08a5c14ee..000000000 --- a/backend/app/core/agent/langfuse_callback.py +++ /dev/null @@ -1,97 +0,0 @@ -""" -Langfuse callback handler for LLM observability. - -Integrates Langfuse tracing with LangChain/LangGraph agents to track: -- LLM calls (prompts, responses, tokens, costs) -- Tool calls and results -- Agent execution traces -- User interactions -""" - -import os -from typing import Any - -from loguru import logger - -try: - from langfuse.langchain import CallbackHandler as LangfuseCallbackHandler - - LANGFUSE_AVAILABLE = True -except ImportError: - LANGFUSE_AVAILABLE = False - logger.warning("langfuse not installed. Langfuse tracing will be disabled.") - - -def get_langfuse_callbacks(enabled: bool = True, **kwargs: Any) -> list[Any]: - """ - Get list of Langfuse callbacks for use with LangChain/LangGraph. - - Environment variables are automatically read from .env: - - LANGFUSE_PUBLIC_KEY - - LANGFUSE_SECRET_KEY - - LANGFUSE_HOST (optional, defaults to https://cloud.langfuse.com) - - Returns a list that can be used in two ways: - 1. Via with_config: runnable.with_config({"callbacks": [...]}) - 2. Via invoke: agent.invoke(..., config={"callbacks": [...]}) - - Example: - # Simple usage - environment variables from .env - langfuse_handler = CallbackHandler() - config = { - "callbacks": [langfuse_handler], - "configurable": {...} - } - result = graph.astream(input=initial_state, config=config) - - Args: - enabled: Whether to enable Langfuse tracing - **kwargs: Additional arguments (for backward compatibility, but not used) - - Returns: - List of callback handlers (empty list if disabled or unavailable) - """ - if not enabled: - logger.debug("[langfuse] Langfuse tracing is disabled") - return [] - - if not LANGFUSE_AVAILABLE: - logger.warning("[langfuse] Langfuse package not installed, skipping callback creation") - return [] - - # Check if environment variables are set - public_key = os.getenv("LANGFUSE_PUBLIC_KEY") - secret_key = os.getenv("LANGFUSE_SECRET_KEY") - host = os.getenv("LANGFUSE_HOST", "https://cloud.langfuse.com") - - # Print configuration (mask sensitive keys) - def _mask_key(k): - return f"{k[:8]}...{k[-4:]}" if k and len(k) > 12 else "***" if k else None - - logger.info( - f"[langfuse] Configuration: enabled={enabled}, " - f"public_key={_mask_key(public_key)}, " - f"secret_key={'***' if secret_key else None}, " - f"host={host}" - ) - - if not public_key or not secret_key: - logger.warning( - "[langfuse] Langfuse keys not found in environment variables. " - "Please set LANGFUSE_PUBLIC_KEY and LANGFUSE_SECRET_KEY in .env file" - ) - return [] - - try: - # Create handler with trace_id from context for cross-system correlation - from app.core.trace_context import get_trace_id - - trace_id = get_trace_id() - handler = LangfuseCallbackHandler( - trace_context={"trace_id": trace_id} if trace_id else None, - ) - logger.info(f"[langfuse] Langfuse callback handler created successfully (host: {host})") - return [handler] - except Exception as e: - logger.error(f"[langfuse] Failed to create Langfuse callback handler: {e}") - return [] diff --git a/backend/app/core/agent/memory/manager.py b/backend/app/core/agent/memory/manager.py index b76872031..4c5f0496d 100644 --- a/backend/app/core/agent/memory/manager.py +++ b/backend/app/core/agent/memory/manager.py @@ -1,6 +1,8 @@ +from __future__ import annotations + from dataclasses import dataclass from textwrap import dedent -from typing import Any, Callable, Dict, List, Literal, Optional, Type, Union +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Literal, Optional, Type, Union from langchain_core.language_models import BaseChatModel from langchain_core.messages.chat import ChatMessage as Message @@ -17,11 +19,19 @@ from app.core.constants import DEFAULT_USER_ID from app.core.tools.tool import EnhancedTool from app.schemas.memory import UserMemory -from app.services.memory_service import MemoryService from app.utils.datetime import utc_now from app.utils.prompts import get_json_output_prompt from app.utils.string import parse_response_model_str +if TYPE_CHECKING: + from app.services.memory_service import MemoryService + + +def _is_memory_service(obj: Any) -> bool: + from app.services.memory_service import MemoryService as _MS # cached by Python's import system + + return isinstance(obj, _MS) + class MemorySearchResponse(BaseModel): """Model for Memory Search Response.""" @@ -140,7 +150,7 @@ def read_from_db(self, user_id: Optional[str] = None): async def aread_from_db(self, user_id: Optional[str] = None): if self.db: - if isinstance(self.db, MemoryService): + if _is_memory_service(self.db): # If no user_id is provided, read all memories if user_id is None: all_memories: List[UserMemory] = await self.db.get_user_memories() # type: ignore @@ -394,9 +404,9 @@ def clear(self) -> None: if self.db: result = self.db.clear_memories() if hasattr(result, "__await__"): - import asyncio + from app.utils.safe_task import safe_create_task - asyncio.create_task(result) # type: ignore[unused-coroutine] + safe_create_task(result, name="memory-clear") # type: ignore[arg-type] def delete_user_memory( self, @@ -431,7 +441,7 @@ def clear_user_memories(self, user_id: Optional[str] = None) -> None: logger.warning("Memory DB not provided.") return - if isinstance(self.db, MemoryService): + if _is_memory_service(self.db): raise ValueError( "clear_user_memories() is not supported with an async DB. Please use aclear_user_memories() instead." ) @@ -447,7 +457,7 @@ def clear_user_memories(self, user_id: Optional[str] = None) -> None: if memory_ids: # Delete all memories in a single batch operation - self.db.delete_user_memories(memory_ids=memory_ids, user_id=user_id) + self.db.delete_user_memories(memory_ids=memory_ids, user_id=user_id) # type: ignore[unused-coroutine] logger.debug(f"Cleared {len(memory_ids)} memories for user {user_id}") async def aclear_user_memories(self, user_id: Optional[str] = None) -> None: @@ -463,7 +473,7 @@ async def aclear_user_memories(self, user_id: Optional[str] = None) -> None: logger.warning("Memory DB not provided.") return - if isinstance(self.db, MemoryService): + if _is_memory_service(self.db): memories = await self.aget_user_memories(user_id=user_id) else: memories = self.get_user_memories(user_id=user_id) @@ -477,10 +487,10 @@ async def aclear_user_memories(self, user_id: Optional[str] = None) -> None: if memory_ids: # Delete all memories in a single batch operation - if isinstance(self.db, MemoryService): + if _is_memory_service(self.db): await self.db.delete_user_memories(memory_ids=memory_ids, user_id=user_id) else: - self.db.delete_user_memories(memory_ids=memory_ids, user_id=user_id) + self.db.delete_user_memories(memory_ids=memory_ids, user_id=user_id) # type: ignore[unused-coroutine] logger.debug(f"Cleared {len(memory_ids)} memories for user {user_id}") # -*- Agent Functions @@ -499,7 +509,7 @@ def create_user_memories( logger.warning("MemoryDb not provided.") return "Please provide a db to store memories" - if isinstance(self.db, MemoryService): + if _is_memory_service(self.db): raise ValueError( "create_user_memories() is not supported with an async DB. Please use acreate_user_memories() instead." ) @@ -567,7 +577,7 @@ async def acreate_user_memories( if user_id is None: user_id = "default" - if isinstance(self.db, MemoryService): + if _is_memory_service(self.db): memories = await self.aread_from_db(user_id=user_id) else: memories = self.read_from_db(user_id=user_id) @@ -589,7 +599,7 @@ async def acreate_user_memories( ) # We refresh from the DB - if isinstance(self.db, MemoryService): + if _is_memory_service(self.db): memories = await self.aread_from_db(user_id=user_id) else: memories = self.read_from_db(user_id=user_id) @@ -603,7 +613,7 @@ def update_memory_task(self, task: str, user_id: Optional[str] = None) -> str: logger.warning("MemoryDb not provided.") return "Please provide a db to store memories" - if not isinstance(self.db, MemoryService): + if not _is_memory_service(self.db): raise ValueError( "update_memory_task() is not supported with an async DB. Please use aupdate_memory_task() instead." ) @@ -645,7 +655,7 @@ async def aupdate_memory_task(self, task: str, user_id: Optional[str] = None) -> if user_id is None: user_id = "default" - if isinstance(self.db, MemoryService): + if _is_memory_service(self.db): memories = await self.aread_from_db(user_id=user_id) else: memories = self.read_from_db(user_id=user_id) @@ -668,7 +678,7 @@ async def aupdate_memory_task(self, task: str, user_id: Optional[str] = None) -> ) # We refresh from the DB - if isinstance(self.db, MemoryService): + if _is_memory_service(self.db): await self.aread_from_db(user_id=user_id) else: self.read_from_db(user_id=user_id) @@ -683,13 +693,13 @@ def _upsert_db_memory(self, memory: UserMemory) -> str: raise ValueError("Memory db not initialized") result = self.db.upsert_user_memory(memory=memory) if hasattr(result, "__await__"): - import asyncio + from app.utils.safe_task import safe_create_task - asyncio.create_task(result) # type: ignore[unused-coroutine] - return "Memory added successfully" + safe_create_task(result, name="memory-upsert") # type: ignore[arg-type] except Exception as e: logger.warning(f"Error storing memory in db: {e}") return f"Error adding memory: {e}" + return "" def _delete_db_memory(self, memory_id: str, user_id: Optional[str] = None) -> str: """Use this function to delete a memory from the database.""" @@ -702,13 +712,13 @@ def _delete_db_memory(self, memory_id: str, user_id: Optional[str] = None) -> st result = self.db.delete_user_memory(memory_id=memory_id, user_id=user_id) if hasattr(result, "__await__"): - import asyncio + from app.utils.safe_task import safe_create_task - asyncio.create_task(result) # type: ignore[unused-coroutine] - return "Memory deleted successfully" + safe_create_task(result, name="memory-delete") # type: ignore[arg-type] except Exception as e: logger.warning(f"Error deleting memory in db: {e}") return f"Error deleting memory: {e}" + return "" # -*- Utility Functions def search_user_memories( @@ -1104,7 +1114,7 @@ def optimize_memories( if user_id is None: user_id = "default" - if isinstance(self.db, MemoryService): + if _is_memory_service(self.db): raise ValueError( "optimize_memories() is not supported with an async DB. Please use aoptimize_memories() instead." ) @@ -1145,7 +1155,7 @@ def optimize_memories( opt_mem.memory_id = str(uuid4()) - self.db.upsert_user_memory(memory=opt_mem) + self.db.upsert_user_memory(memory=opt_mem) # type: ignore[unused-coroutine] optimized_tokens = strategy_instance.count_tokens(optimized_memories) logger.debug(f"Optimization complete. New token count: {optimized_tokens}") @@ -1176,7 +1186,7 @@ async def aoptimize_memories( user_id = "default" # Get user memories - handle both sync and async DBs - if isinstance(self.db, MemoryService): + if _is_memory_service(self.db): memories = await self.aget_user_memories(user_id=user_id) else: memories = self.get_user_memories(user_id=user_id) @@ -1215,10 +1225,10 @@ async def aoptimize_memories( opt_mem.memory_id = str(uuid4()) - if isinstance(self.db, MemoryService): + if _is_memory_service(self.db): await self.db.upsert_user_memory(memory=opt_mem) - elif isinstance(self.db, MemoryService): - self.db.upsert_user_memory(memory=opt_mem) + else: + self.db.upsert_user_memory(memory=opt_mem) # type: ignore[unused-coroutine] optimized_tokens = strategy_instance.count_tokens(optimized_memories) logger.debug(f"Memory optimization complete. New token count: {optimized_tokens}") @@ -1458,7 +1468,7 @@ async def acreate_or_update_memories( # and LangChain models are thread-safe model_copy = self.model # Update the Model (set defaults, add logit etc.) - if isinstance(db, MemoryService): + if _is_memory_service(db): _tools = self.determine_tools_for_model( await self._aget_db_tools( user_id, @@ -1668,7 +1678,7 @@ async def arun_memory_task( # and LangChain models are thread-safe model_copy = self.model # Update the Model (set defaults, add logit etc.) - if isinstance(db, MemoryService): + if _is_memory_service(db): _tools = self.determine_tools_for_model( await self._aget_db_tools( user_id, @@ -1952,7 +1962,7 @@ async def add_memory(memory: str, topics: Optional[List[str]] = None) -> str: try: memory_id = str(uuid4()) - if isinstance(db, MemoryService): + if _is_memory_service(db): await db.upsert_user_memory( UserMemory( memory_id=memory_id, @@ -1965,7 +1975,7 @@ async def add_memory(memory: str, topics: Optional[List[str]] = None) -> str: ) ) else: - db.upsert_user_memory( + db.upsert_user_memory( # type: ignore[unused-coroutine] UserMemory( memory_id=memory_id, user_id=user_id, @@ -1997,7 +2007,7 @@ async def update_memory(memory_id: str, memory: str, topics: Optional[List[str]] return "Can't update memory with empty string. Use the delete memory function if available." try: - if isinstance(db, MemoryService): + if _is_memory_service(db): await db.upsert_user_memory( UserMemory( memory_id=memory_id, @@ -2007,7 +2017,7 @@ async def update_memory(memory_id: str, memory: str, topics: Optional[List[str]] ) ) else: - db.upsert_user_memory( + db.upsert_user_memory( # type: ignore[unused-coroutine] UserMemory( memory_id=memory_id, memory=memory, @@ -2029,10 +2039,10 @@ async def delete_memory(memory_id: str) -> str: str: A message indicating if the memory was deleted successfully or not. """ try: - if isinstance(db, MemoryService): + if _is_memory_service(db): await db.delete_user_memory(memory_id=memory_id, user_id=user_id) else: - db.delete_user_memory(memory_id=memory_id, user_id=user_id) + db.delete_user_memory(memory_id=memory_id, user_id=user_id) # type: ignore[unused-coroutine] logger.debug("Memory deleted") return "Memory deleted successfully" except Exception as e: @@ -2045,10 +2055,10 @@ async def clear_memory() -> str: Returns: str: A message indicating if the memory was cleared successfully or not. """ - if isinstance(db, MemoryService): + if _is_memory_service(db): await db.clear_memories() else: - db.clear_memories() + db.clear_memories() # type: ignore[unused-coroutine] logger.debug("Memory cleared") return "Memory cleared successfully" diff --git a/backend/app/core/agent/middleware/logging.py b/backend/app/core/agent/middleware/logging.py index 753d7cb1e..9180ad4f5 100644 --- a/backend/app/core/agent/middleware/logging.py +++ b/backend/app/core/agent/middleware/logging.py @@ -6,10 +6,7 @@ import traceback from collections.abc import Awaitable, Callable from datetime import datetime -from typing import TYPE_CHECKING, Any, Dict, List, Optional - -if TYPE_CHECKING: - pass +from typing import Any, Dict, List, Optional from deepagents.backends.protocol import BackendProtocol from langchain.agents.middleware.types import AgentMiddleware, AgentState, ModelRequest, ModelResponse @@ -632,16 +629,5 @@ def get_error_summary(self) -> Dict[str, Any]: except Exception: return {"error": "Failed to generate error summary"} - def cleanup_old_logs(self, days_to_keep: int = 30) -> None: - """Clean up old log files.""" - time.time() - (days_to_keep * 24 * 60 * 60) - - try: - # concrete cleanup logic needs to be implemented here; - # limited by BackendProtocol, this is a placeholder - pass - except Exception as e: - logger.warning(f"Failed to cleanup old logs: {e}") - # (no additional imports needed) diff --git a/backend/app/core/agent/middleware/memory_iteration_with_db.py b/backend/app/core/agent/middleware/memory_iteration_with_db.py index b245c76c1..8379e47af 100644 --- a/backend/app/core/agent/middleware/memory_iteration_with_db.py +++ b/backend/app/core/agent/middleware/memory_iteration_with_db.py @@ -10,7 +10,7 @@ import asyncio from collections.abc import Awaitable, Callable -from typing import TYPE_CHECKING, Any, List, Literal, Optional +from typing import Any, List, Literal, Optional from langchain.agents.middleware.types import AgentMiddleware, AgentState, ModelRequest, ModelResponse from langchain_core.messages import AIMessage, HumanMessage @@ -20,9 +20,6 @@ from app.core.agent.memory.manager import MemoryManager from app.schemas.memory import UserMemory -if TYPE_CHECKING: - pass - class AgenticMemoryState(AgentState): """Extended state for the Agentic Memory middleware.""" diff --git a/backend/app/core/agent/node_tools.py b/backend/app/core/agent/node_tools.py index 563988904..3410e81c6 100644 --- a/backend/app/core/agent/node_tools.py +++ b/backend/app/core/agent/node_tools.py @@ -1,13 +1,15 @@ """ Node tools resolution. -Parses `GraphNode` tool configuration (persisted in DB) and resolves it into a -LangChain-compatible tools list for `create_agent(..., tools=[...])`. +Parses node tool configuration and resolves it into a LangChain-compatible +tools list for ``create_agent(..., tools=[...])``. Frontend stores tools under: - node.data.config.tools = { builtin: string[], mcp: string[] } - where mcp entries are in format `${server_name}::${toolName}`. -Backend also has a dedicated `GraphNode.tools` JSONB field; we support both. + where mcp entries are in format ``${server_name}::${toolName}``. + +Node objects are duck-typed: any object with ``id``, ``data``, and optional +``tools`` attributes is accepted. """ from __future__ import annotations @@ -23,7 +25,6 @@ # Import default user ID constant from app.core.constants import DEFAULT_USER_ID from app.core.tools.tool import EnhancedTool, ToolMetadata, ToolSourceType -from app.models.graph import GraphNode from app.utils.sandbox_paths import get_user_sandbox_host_dir @@ -34,9 +35,9 @@ def _first_dict(*candidates: Any) -> Optional[dict]: return None -def extract_tools_config(node: GraphNode) -> Optional[dict]: +def extract_tools_config(node: Any) -> Optional[dict]: """ - Extract tools config dict from a GraphNode. + Extract tools config dict from a node data object. Preference order: 1) node.data.config.tools (canonical) @@ -381,22 +382,20 @@ def _normalize_user_id(user_id: Any | None) -> str: return str(user_id) -async def resolve_tools_for_node( - node: GraphNode, *, user_id: str | None = None, backend: Any = None -) -> Optional[List[Any]]: +async def resolve_tools_for_node(node: Any, *, user_id: str | None = None, backend: Any = None) -> Optional[List[Any]]: """ Resolve tools list for a node. Process flow: 1. Extract tools config from node - 2. Parse builtin tools → resolve to tool objects - 3. Parse MCP tools → resolve server names → get tools + 2. Parse builtin tools -> resolve to tool objects + 3. Parse MCP tools -> resolve server names -> get tools 4. Return combined tool list MCP server identification: server name (unique per user) Args: - node: GraphNode to resolve tools for + node: Node data object with id/data attributes user_id: User ID (normalized to string UUID format) Returns: diff --git a/backend/app/core/agent/sample_agent.py b/backend/app/core/agent/sample_agent.py index 31412ff3e..c4f5b006b 100644 --- a/backend/app/core/agent/sample_agent.py +++ b/backend/app/core/agent/sample_agent.py @@ -16,10 +16,9 @@ from langchain_openai import ChatOpenAI from pydantic import SecretStr -from app.common.exceptions import ModelConfigError +from app.common.app_errors import ModelConfigError from app.core.agent.backends.filesystem_sandbox import FilesystemSandboxBackend from app.core.agent.middleware import LoggingMiddleware -from app.services.model_service import MODEL_NAME_REQUIRED, MODEL_NO_CREDENTIALS load_dotenv() @@ -52,7 +51,7 @@ def get_default_model( if not llm_model: raise ModelConfigError( - MODEL_NAME_REQUIRED, + ModelConfigError.MODEL_NAME_REQUIRED, "Model name is required but was not specified.", ) @@ -60,7 +59,7 @@ def get_default_model( if not api_key_value: raise ModelConfigError( - MODEL_NO_CREDENTIALS, + ModelConfigError.MODEL_NO_CREDENTIALS, f'No valid API key provided for model "{model_name}".', params={"model": model_name}, ) diff --git a/backend/app/core/ai_adapter.py b/backend/app/core/ai_adapter.py index fc997a160..7468e66f5 100644 --- a/backend/app/core/ai_adapter.py +++ b/backend/app/core/ai_adapter.py @@ -64,7 +64,7 @@ def __init__( self.engine = engine # Create per-session directories using configured workspace root - workspace_root = Path(settings.WORKSPACE_ROOT) + workspace_root = Path(settings.workspace_root) self.session_dir = workspace_root / "sessions" / session_id self.session_dir.mkdir(parents=True, exist_ok=True) diff --git a/backend/app/core/contracts/__init__.py b/backend/app/core/contracts/__init__.py new file mode 100644 index 000000000..39ed65f12 --- /dev/null +++ b/backend/app/core/contracts/__init__.py @@ -0,0 +1,83 @@ +"""Canonical backend contract values.""" + +from app.core.contracts.agent import ( + ALL_ENGINE_KINDS, + CLI_ENGINE_KINDS, + ENGINE_KINDS, + ENGINE_RUNTIME_MAP, + INTERNAL_ENGINE_KINDS, + RUNTIME_KINDS, + AllEngineKind, + EngineKind, + InternalEngineKind, + RuntimeKind, + infer_runtime_kind, + is_cli_engine_kind, + normalize_engine_kind, + normalize_runtime_kind, +) +from app.core.contracts.error import ( + ERROR_CODES, + ERROR_SOURCES, + USER_ACTIONS, + ErrorCode, + ErrorSource, + ErrorSourceLiteral, + UserAction, + UserActionLiteral, +) +from app.core.contracts.execution import ( + ACTIVE_EXECUTION_STATUSES, + ACTIVE_RUN_STATUSES, + EXECUTION_STATUSES, + RELEASE_STATUSES, + RUN_PURPOSES, + RUN_STATUSES, + TERMINAL_EXECUTION_STATUSES, + TERMINAL_RUN_STATUSES, + TRIGGER_MEDIUMS, + ExecutionStatusLiteral, + ReleaseStatusLiteral, + RunPurposeLiteral, + RunStatusLiteral, + TriggerMediumLiteral, +) + +__all__ = [ + "ACTIVE_EXECUTION_STATUSES", + "ACTIVE_RUN_STATUSES", + "ALL_ENGINE_KINDS", + "CLI_ENGINE_KINDS", + "ENGINE_KINDS", + "ENGINE_RUNTIME_MAP", + "ERROR_CODES", + "ERROR_SOURCES", + "EXECUTION_STATUSES", + "AllEngineKind", + "EngineKind", + "ErrorCode", + "ErrorSource", + "ErrorSourceLiteral", + "ExecutionStatusLiteral", + "INTERNAL_ENGINE_KINDS", + "InternalEngineKind", + "RELEASE_STATUSES", + "RUN_STATUSES", + "RUNTIME_KINDS", + "ReleaseStatusLiteral", + "RunStatusLiteral", + "RuntimeKind", + "TERMINAL_EXECUTION_STATUSES", + "TERMINAL_RUN_STATUSES", + "TRIGGER_MEDIUMS", + "RUN_PURPOSES", + "TriggerMediumLiteral", + "RunPurposeLiteral", + "USER_ACTIONS", + "UserAction", + "UserActionLiteral", + "infer_runtime_kind", + "is_cli_engine_kind", + "normalize_engine_kind", + "normalize_runtime_kind", +] diff --git a/backend/app/core/contracts/agent.py b/backend/app/core/contracts/agent.py new file mode 100644 index 000000000..cdcb96c4d --- /dev/null +++ b/backend/app/core/contracts/agent.py @@ -0,0 +1,58 @@ +"""Canonical Agent engine/runtime kind contract values.""" + +from __future__ import annotations + +from typing import Literal, Union, get_args + +from app.common.app_errors import InvalidRequestError + +EngineKind = Literal[ + "langgraph_visual", + "langgraph_code", + "claude_code", + "codex", + "openclaw", +] +ENGINE_KINDS: set[str] = set(get_args(EngineKind)) + +InternalEngineKind = Literal["build_copilot"] +INTERNAL_ENGINE_KINDS: set[str] = set(get_args(InternalEngineKind)) + +AllEngineKind = Union[EngineKind, InternalEngineKind] +ALL_ENGINE_KINDS: set[str] = ENGINE_KINDS | INTERNAL_ENGINE_KINDS + +RuntimeKind = Literal["sandbox", "server"] +RUNTIME_KINDS: set[str] = set(get_args(RuntimeKind)) + +ENGINE_RUNTIME_MAP: dict[str, str] = { + "langgraph_visual": "server", + "langgraph_code": "server", + "claude_code": "sandbox", + "codex": "sandbox", + "openclaw": "sandbox", +} + +CLI_ENGINE_KINDS: set[str] = {k for k, v in ENGINE_RUNTIME_MAP.items() if v == "sandbox"} + + +def infer_runtime_kind(engine_kind: str) -> str: + runtime_kind = ENGINE_RUNTIME_MAP.get(engine_kind) + if not runtime_kind: + raise InvalidRequestError( + f"Unsupported engine_kind={engine_kind}", + code="AGENT_ENGINE_KIND_UNSUPPORTED", + data={"engine_kind": engine_kind}, + ) + return runtime_kind + + +def is_cli_engine_kind(engine_kind: str) -> bool: + return engine_kind in CLI_ENGINE_KINDS + + +def normalize_engine_kind(engine_kind: str | None) -> str | None: + return engine_kind if engine_kind in ENGINE_KINDS else None + + +def normalize_runtime_kind(runtime_kind: str | None) -> str | None: + return runtime_kind if runtime_kind in RUNTIME_KINDS else None diff --git a/backend/app/core/contracts/error.py b/backend/app/core/contracts/error.py new file mode 100644 index 000000000..bc9d01de2 --- /dev/null +++ b/backend/app/core/contracts/error.py @@ -0,0 +1,416 @@ +"""Canonical error contract values — source of truth for error metadata. + +Mirrors the pattern of agent.py and execution.py: StrEnum for type-safe +values, Literal types for function signatures, plain sets for runtime checks. +""" + +from __future__ import annotations + +from enum import StrEnum +from typing import Literal + + +# --------------------------------------------------------------------------- +# Error source — classifies where the failure originated +# --------------------------------------------------------------------------- +class ErrorSource(StrEnum): + API = "api" + ENGINE = "engine" + RUNTIME = "runtime" + NODE = "node" + TOOL = "tool" + WEBSOCKET = "websocket" + AUTH = "auth" + VALIDATION = "validation" + PERMISSION = "permission" + INTERNAL = "internal" + + +ErrorSourceLiteral = Literal[ + "api", + "engine", + "runtime", + "node", + "tool", + "websocket", + "auth", + "validation", + "permission", + "internal", +] + +ERROR_SOURCES: set[str] = {s.value for s in ErrorSource} + + +# --------------------------------------------------------------------------- +# User action — what the user should do to resolve the failure +# --------------------------------------------------------------------------- +class UserAction(StrEnum): + RETRY = "retry" + CONFIGURE_MODEL = "configure_model" + RELOGIN = "relogin" + FIX_INPUT = "fix_input" + CONTACT_SUPPORT = "contact_support" + + +UserActionLiteral = Literal[ + "retry", + "configure_model", + "relogin", + "fix_input", + "contact_support", +] + +USER_ACTIONS: set[str] = {a.value for a in UserAction} + + +# --------------------------------------------------------------------------- +# Error codes — the canonical registry of all product error codes. +# +# Every error code used anywhere in backend must have an entry here. +# StrEnum values compare equal to their string values, so existing call +# sites using raw strings remain compatible during gradual migration. +# --------------------------------------------------------------------------- +class ErrorCode(StrEnum): + # -- Generic -- + NOT_FOUND = "NOT_FOUND" + BAD_REQUEST = "BAD_REQUEST" + UNAUTHORIZED = "UNAUTHORIZED" + FORBIDDEN = "FORBIDDEN" + CONFLICT = "CONFLICT" + RATE_LIMITED = "RATE_LIMITED" + INTERNAL_ERROR = "INTERNAL_ERROR" + SERVICE_UNAVAILABLE = "SERVICE_UNAVAILABLE" + CLIENT_CLOSED = "CLIENT_CLOSED" + REQUEST_VALIDATION_ERROR = "REQUEST_VALIDATION_ERROR" + SUPERUSER_REQUIRED = "SUPERUSER_REQUIRED" + + # -- Auth -- + AUTH_REQUIRED = "AUTH_REQUIRED" + BEARER_TOKEN_MISSING = "BEARER_TOKEN_MISSING" + TOKEN_INVALID = "TOKEN_INVALID" + TOKEN_REFRESH_FAILED = "TOKEN_REFRESH_FAILED" + TOKEN_REFRESH_UNAVAILABLE = "TOKEN_REFRESH_UNAVAILABLE" + TOKEN_RESOURCE_ID_INVALID = "TOKEN_RESOURCE_ID_INVALID" + REFRESH_TOKEN_INVALID = "REFRESH_TOKEN_INVALID" + RESET_TOKEN_INVALID = "RESET_TOKEN_INVALID" + RESET_TOKEN_EXPIRED = "RESET_TOKEN_EXPIRED" + VERIFICATION_TOKEN_INVALID = "VERIFICATION_TOKEN_INVALID" + VERIFICATION_TOKEN_EXPIRED = "VERIFICATION_TOKEN_EXPIRED" + CREDENTIALS_INVALID = "CREDENTIALS_INVALID" + INVALID_CREDENTIALS = "INVALID_CREDENTIALS" + MISSING_CREDENTIALS = "MISSING_CREDENTIALS" + EMAIL_NOT_VERIFIED = "EMAIL_NOT_VERIFIED" + EMAIL_ALREADY_VERIFIED = "EMAIL_ALREADY_VERIFIED" + + # -- User -- + USER_NOT_FOUND = "USER_NOT_FOUND" + USER_ALREADY_EXISTS = "USER_ALREADY_EXISTS" + USER_INACTIVE = "USER_INACTIVE" + USER_INVALID = "USER_INVALID" + + # -- Agent -- + AGENT_NOT_FOUND = "AGENT_NOT_FOUND" + AGENT_NOT_PUBLISHED = "AGENT_NOT_PUBLISHED" + AGENT_ACTIVE_RELEASE_MISSING = "AGENT_ACTIVE_RELEASE_MISSING" + AGENT_DRAFT_VERSION_MISSING = "AGENT_DRAFT_VERSION_MISSING" + AGENT_DRAFT_VERSION_NOT_FOUND = "AGENT_DRAFT_VERSION_NOT_FOUND" + AGENT_WORKSPACE_MISMATCH = "AGENT_WORKSPACE_MISMATCH" + AGENT_DEFINITION_KIND_UNSUPPORTED = "AGENT_DEFINITION_KIND_UNSUPPORTED" + AGENT_DELETE_TASK_REFERENCE_CONFLICT = "AGENT_DELETE_TASK_REFERENCE_CONFLICT" + AGENT_VERSION_NOT_FOUND = "AGENT_VERSION_NOT_FOUND" + AGENT_VERSION_AGENT_MISMATCH = "AGENT_VERSION_AGENT_MISMATCH" + AGENT_VERSION_NOT_FROZEN = "AGENT_VERSION_NOT_FROZEN" + AGENT_RELEASE_NOT_FOUND = "AGENT_RELEASE_NOT_FOUND" + AGENT_RELEASE_NOT_ACTIVATABLE = "AGENT_RELEASE_NOT_ACTIVATABLE" + AGENT_RELEASE_ACTIVE_CANNOT_RETIRE = "AGENT_RELEASE_ACTIVE_CANNOT_RETIRE" + AGENT_RELEASE_AGENT_MISMATCH = "AGENT_RELEASE_AGENT_MISMATCH" + AGENT_RUN_NOT_FOUND = "AGENT_RUN_NOT_FOUND" + AGENT_RUN_FILTER_REQUIRED = "AGENT_RUN_FILTER_REQUIRED" + AGENT_RUN_WORKSPACE_REQUIRED = "AGENT_RUN_WORKSPACE_REQUIRED" + + # -- Run -- + RUN_NOT_FOUND = "RUN_NOT_FOUND" + RUN_BINDING_INVALID = "RUN_BINDING_INVALID" + RUN_CANCEL_STATUS_INVALID = "RUN_CANCEL_STATUS_INVALID" + RUN_RETRY_STATUS_INVALID = "RUN_RETRY_STATUS_INVALID" + RUN_RETRY_DRAFT_FORBIDDEN = "RUN_RETRY_DRAFT_FORBIDDEN" + RETRY_STATE_INVALID = "RETRY_STATE_INVALID" + + # -- Execution -- + EXECUTION_NOT_FOUND = "EXECUTION_NOT_FOUND" + EXECUTION_SNAPSHOT_NOT_FOUND = "EXECUTION_SNAPSHOT_NOT_FOUND" + EXECUTION_ENGINE_FAILED = "EXECUTION_ENGINE_FAILED" + EXECUTION_ENGINE_NOT_REGISTERED = "EXECUTION_ENGINE_NOT_REGISTERED" + EXECUTION_EVENT_CONTEXT_MISSING = "EXECUTION_EVENT_CONTEXT_MISSING" + EXECUTION_FAILED = "EXECUTION_FAILED" + EXECUTION_MESSAGE_UNSUPPORTED = "EXECUTION_MESSAGE_UNSUPPORTED" + EXECUTION_MESSAGE_REJECTED = "EXECUTION_MESSAGE_REJECTED" + EXECUTION_OPERATION_UNSUPPORTED = "EXECUTION_OPERATION_UNSUPPORTED" + EXECUTION_RUNTIME_KIND_MISSING = "EXECUTION_RUNTIME_KIND_MISSING" + STALE_REAPED = "STALE_REAPED" + DRAFT_OVERRIDE_PARAMETERS_INVALID = "DRAFT_OVERRIDE_PARAMETERS_INVALID" + + # -- Graph engine -- + GRAPH_DEFINITION_KIND_UNSUPPORTED = "GRAPH_DEFINITION_KIND_UNSUPPORTED" + GRAPH_DEFINITION_NODES_EMPTY = "GRAPH_DEFINITION_NODES_EMPTY" + GRAPH_EXECUTION_FAILED = "GRAPH_EXECUTION_FAILED" + GRAPH_EXECUTION_NOT_RUNNING = "GRAPH_EXECUTION_NOT_RUNNING" + GRAPH_EXECUTION_MESSAGE_UNSUPPORTED = "GRAPH_EXECUTION_MESSAGE_UNSUPPORTED" + + # -- Code engine -- + CODE_DEFINITION_EMPTY = "CODE_DEFINITION_EMPTY" + CODE_DEFINITION_KIND_UNSUPPORTED = "CODE_DEFINITION_KIND_UNSUPPORTED" + CODE_EXECUTION_FAILED = "CODE_EXECUTION_FAILED" + CODE_EXECUTION_INVALID = "CODE_EXECUTION_INVALID" + CODE_EXECUTION_NOT_RUNNING = "CODE_EXECUTION_NOT_RUNNING" + CODE_EXECUTION_MESSAGE_UNSUPPORTED = "CODE_EXECUTION_MESSAGE_UNSUPPORTED" + CODEX_THREAD_START_INVALID = "CODEX_THREAD_START_INVALID" + + # -- CLI engine -- + CLI_EXECUTION_RUNNER_FAILED = "CLI_EXECUTION_RUNNER_FAILED" + CLAUDE_CODE_RUNTIME_ERROR = "CLAUDE_CODE_RUNTIME_ERROR" + CODEX_RUNTIME_ERROR = "CODEX_RUNTIME_ERROR" + RUNTIME_PROVIDER_NOT_FOUND = "RUNTIME_PROVIDER_NOT_FOUND" + + # -- Copilot -- + AGENT_ERROR = "AGENT_ERROR" + CREDENTIAL_ERROR = "CREDENTIAL_ERROR" + CANCELLED = "CANCELLED" + COPILOT_EXECUTION_FAILED = "COPILOT_EXECUTION_FAILED" + COPILOT_EXECUTION_MESSAGE_UNSUPPORTED = "COPILOT_EXECUTION_MESSAGE_UNSUPPORTED" + UNKNOWN_ERROR = "UNKNOWN_ERROR" + + # -- Event bus -- + EVENT_ERROR_PAYLOAD_MISSING = "EVENT_ERROR_PAYLOAD_MISSING" + EVENT_SUBSCRIBER_DB_SESSION_MISSING = "EVENT_SUBSCRIBER_DB_SESSION_MISSING" + EVENT_TARGET_STATUS_MISSING = "EVENT_TARGET_STATUS_MISSING" + EVENT_TERMINAL_STATUS_MISSING = "EVENT_TERMINAL_STATUS_MISSING" + EVENT_TERMINAL_STATUS_INVALID = "EVENT_TERMINAL_STATUS_INVALID" + + # -- State machine -- + STATE_TRANSITION_INVALID = "STATE_TRANSITION_INVALID" + + # -- Model -- + MODEL_NOT_FOUND = "MODEL_NOT_FOUND" + MODEL_NO_CREDENTIALS = "MODEL_NO_CREDENTIALS" + MODEL_NAME_REQUIRED = "MODEL_NAME_REQUIRED" + MODEL_TYPE_UNSUPPORTED = "MODEL_TYPE_UNSUPPORTED" + MODEL_STREAM_ERROR = "MODEL_STREAM_ERROR" + MODEL_INSTANCE_NOT_FOUND = "MODEL_INSTANCE_NOT_FOUND" + MODEL_INSTANCE_CREATE_FAILED = "MODEL_INSTANCE_CREATE_FAILED" + MODEL_PROVIDER_NOT_FOUND = "MODEL_PROVIDER_NOT_FOUND" + MODEL_PROVIDER_OR_NAME_REQUIRED = "MODEL_PROVIDER_OR_NAME_REQUIRED" + MODEL_PROVIDER_DELETE_FORBIDDEN = "MODEL_PROVIDER_DELETE_FORBIDDEN" + MODEL_PROVIDER_BUILTIN_DELETE_FORBIDDEN = "MODEL_PROVIDER_BUILTIN_DELETE_FORBIDDEN" + MODEL_CREDENTIAL_NOT_FOUND = "MODEL_CREDENTIAL_NOT_FOUND" + MODEL_CREDENTIAL_CUSTOM_DELETE_FORBIDDEN = "MODEL_CREDENTIAL_CUSTOM_DELETE_FORBIDDEN" + BUILD_COPILOT_MODEL_REQUIRED = "BUILD_COPILOT_MODEL_REQUIRED" + + # -- Sandbox -- + SANDBOX_NOT_FOUND = "SANDBOX_NOT_FOUND" + SANDBOX_NOT_RUNNING = "SANDBOX_NOT_RUNNING" + SANDBOX_ACCESS_DENIED = "SANDBOX_ACCESS_DENIED" + SANDBOX_SETUP_FAILED = "SANDBOX_SETUP_FAILED" + SANDBOX_STOP_TARGET_NOT_FOUND = "SANDBOX_STOP_TARGET_NOT_FOUND" + SANDBOX_IMAGE_EMPTY = "SANDBOX_IMAGE_EMPTY" + SANDBOX_IMAGE_TOO_LONG = "SANDBOX_IMAGE_TOO_LONG" + SANDBOX_PROVIDER_UNKNOWN = "SANDBOX_PROVIDER_UNKNOWN" + DOCKER_DAEMON_UNAVAILABLE = "DOCKER_DAEMON_UNAVAILABLE" + DOCKER_COMMAND_FAILED = "DOCKER_COMMAND_FAILED" + DOCKER_EXECUTOR_DEPENDENCY_MISSING = "DOCKER_EXECUTOR_DEPENDENCY_MISSING" + DOCKER_EXECUTOR_INIT_FAILED = "DOCKER_EXECUTOR_INIT_FAILED" + DOCKER_SANDBOX_CREATE_FAILED = "DOCKER_SANDBOX_CREATE_FAILED" + DOCKER_SANDBOX_START_FAILED = "DOCKER_SANDBOX_START_FAILED" + MODAL_SANDBOX_STARTUP_FAILED = "MODAL_SANDBOX_STARTUP_FAILED" + MODAL_SANDBOX_STARTUP_TIMEOUT = "MODAL_SANDBOX_STARTUP_TIMEOUT" + RUNLOOP_API_KEY_MISSING = "RUNLOOP_API_KEY_MISSING" + RUNLOOP_DEVBOX_STARTUP_TIMEOUT = "RUNLOOP_DEVBOX_STARTUP_TIMEOUT" + CHECKPOINTER_DISABLED = "CHECKPOINTER_DISABLED" + CHECKPOINTER_POOL_UNINITIALIZED = "CHECKPOINTER_POOL_UNINITIALIZED" + CHECKPOINTER_MANAGER_UNINITIALIZED = "CHECKPOINTER_MANAGER_UNINITIALIZED" + DEEPAGENTS_UNAVAILABLE = "DEEPAGENTS_UNAVAILABLE" + + # -- Skill -- + SKILL_NOT_FOUND = "SKILL_NOT_FOUND" + SKILL_ACCESS_DENIED = "SKILL_ACCESS_DENIED" + SKILL_DELETE_FORBIDDEN = "SKILL_DELETE_FORBIDDEN" + SKILL_NAME_ALREADY_EXISTS = "SKILL_NAME_ALREADY_EXISTS" + SKILL_NAME_INVALID = "SKILL_NAME_INVALID" + SKILL_OWNER_COLLABORATOR_FORBIDDEN = "SKILL_OWNER_COLLABORATOR_FORBIDDEN" + SKILL_OWNER_TRANSFER_FORBIDDEN = "SKILL_OWNER_TRANSFER_FORBIDDEN" + SKILL_TOKEN_SCOPE_FORBIDDEN = "SKILL_TOKEN_SCOPE_FORBIDDEN" + SKILL_COLLABORATOR_NOT_FOUND = "SKILL_COLLABORATOR_NOT_FOUND" + SKILL_COLLABORATOR_ALREADY_EXISTS = "SKILL_COLLABORATOR_ALREADY_EXISTS" + SKILL_VERSION_NOT_FOUND = "SKILL_VERSION_NOT_FOUND" + SKILL_VERSION_FORMAT_INVALID = "SKILL_VERSION_FORMAT_INVALID" + SKILL_VERSION_NOT_GREATER_THAN_LATEST = "SKILL_VERSION_NOT_GREATER_THAN_LATEST" + SKILL_VERSION_PRERELEASE_UNSUPPORTED = "SKILL_VERSION_PRERELEASE_UNSUPPORTED" + SKILL_IMPORT_FILES_INVALID = "SKILL_IMPORT_FILES_INVALID" + SKILL_FILE_NOT_FOUND = "SKILL_FILE_NOT_FOUND" + SKILL_FILE_CONTENT_INVALID = "SKILL_FILE_CONTENT_INVALID" + SKILL_SYSTEM_FILE_IMPORT_FORBIDDEN = "SKILL_SYSTEM_FILE_IMPORT_FORBIDDEN" + SKILL_LOAD_FAILED = "SKILL_LOAD_FAILED" + SKILL_FILE_WRITE_FAILED = "SKILL_FILE_WRITE_FAILED" + + # -- Tool / MCP -- + TOOL_NOT_FOUND = "TOOL_NOT_FOUND" + TOOL_EXECUTION_FAILED = "TOOL_EXECUTION_FAILED" + CUSTOM_TOOL_NOT_FOUND = "CUSTOM_TOOL_NOT_FOUND" + CUSTOM_TOOL_NAME_ALREADY_EXISTS = "CUSTOM_TOOL_NAME_ALREADY_EXISTS" + CUSTOM_TOOL_QUOTA_EXCEEDED = "CUSTOM_TOOL_QUOTA_EXCEEDED" + CUSTOM_TOOL_VIEW_FORBIDDEN = "CUSTOM_TOOL_VIEW_FORBIDDEN" + CUSTOM_TOOL_UPDATE_FORBIDDEN = "CUSTOM_TOOL_UPDATE_FORBIDDEN" + CUSTOM_TOOL_DELETE_FORBIDDEN = "CUSTOM_TOOL_DELETE_FORBIDDEN" + MCP_SERVER_NOT_FOUND = "MCP_SERVER_NOT_FOUND" + MCP_SERVER_NAME_ALREADY_EXISTS = "MCP_SERVER_NAME_ALREADY_EXISTS" + MCP_SERVER_URL_REQUIRED = "MCP_SERVER_URL_REQUIRED" + MCP_SERVER_IDENTIFIER_INVALID = "MCP_SERVER_IDENTIFIER_INVALID" + MCP_SERVER_ACCESS_DENIED = "MCP_SERVER_ACCESS_DENIED" + MCP_SERVER_CONNECTION_FAILED = "MCP_SERVER_CONNECTION_FAILED" + MCP_SERVER_DISABLED = "MCP_SERVER_DISABLED" + MCP_SERVER_REFRESH_DISABLED = "MCP_SERVER_REFRESH_DISABLED" + MCP_SERVER_RESOLVE_FAILED = "MCP_SERVER_RESOLVE_FAILED" + MCP_SERVER_TOGGLE_TARGET_MISSING = "MCP_SERVER_TOGGLE_TARGET_MISSING" + MCP_SERVER_UPDATE_TARGET_MISSING = "MCP_SERVER_UPDATE_TARGET_MISSING" + MCP_TOOL_NOT_FOUND = "MCP_TOOL_NOT_FOUND" + MCP_TOOL_EXECUTION_FAILED = "MCP_TOOL_EXECUTION_FAILED" + MCP_TOOLKIT_SESSION_MISSING = "MCP_TOOLKIT_SESSION_MISSING" + MCP_STARTUP_SYNC_FAILED = "MCP_STARTUP_SYNC_FAILED" + + # -- Task -- + TASK_NOT_FOUND = "TASK_NOT_FOUND" + TASK_AGENT_MISSING = "TASK_AGENT_MISSING" + TASK_PRIORITY_INVALID = "TASK_PRIORITY_INVALID" + TASK_STATUS_INVALID = "TASK_STATUS_INVALID" + TASK_STATUS_NOT_DISPATCHABLE = "TASK_STATUS_NOT_DISPATCHABLE" + TASK_STATUS_TRANSITION_INVALID = "TASK_STATUS_TRANSITION_INVALID" + TASK_RUN_ALREADY_ACTIVE = "TASK_RUN_ALREADY_ACTIVE" + TASK_ACTIVITY_NOT_FOUND = "TASK_ACTIVITY_NOT_FOUND" + + # -- Thread -- + THREAD_NOT_FOUND = "THREAD_NOT_FOUND" + THREAD_ACTIVE_RUN_EXISTS = "THREAD_ACTIVE_RUN_EXISTS" + + # -- File -- + FILE_NOT_FOUND = "FILE_NOT_FOUND" + FILE_EMPTY = "FILE_EMPTY" + FILE_TOO_LARGE = "FILE_TOO_LARGE" + FILE_TYPE_UNSUPPORTED = "FILE_TYPE_UNSUPPORTED" + FILE_CONTENT_TYPE_MISMATCH = "FILE_CONTENT_TYPE_MISMATCH" + FILE_UPLOAD_FAILED = "FILE_UPLOAD_FAILED" + FILE_WRITE_FAILED = "FILE_WRITE_FAILED" + FILE_READ_FAILED = "FILE_READ_FAILED" + FILE_DELETE_FAILED = "FILE_DELETE_FAILED" + FILE_LIST_FAILED = "FILE_LIST_FAILED" + FILE_CLEAR_FAILED = "FILE_CLEAR_FAILED" + + # -- Artifact -- + ARTIFACT_FILE_NOT_FOUND = "ARTIFACT_FILE_NOT_FOUND" + ARTIFACT_FILE_READ_FAILED = "ARTIFACT_FILE_READ_FAILED" + ARTIFACT_RUN_DELETE_FAILED = "ARTIFACT_RUN_DELETE_FAILED" + + # -- Memory -- + MEMORY_NOT_FOUND = "MEMORY_NOT_FOUND" + MEMORY_CREATE_FAILED = "MEMORY_CREATE_FAILED" + MEMORY_UPDATE_FAILED = "MEMORY_UPDATE_FAILED" + MEMORY_IDS_EMPTY = "MEMORY_IDS_EMPTY" + MEMORY_TABLE_TYPE_UNSUPPORTED = "MEMORY_TABLE_TYPE_UNSUPPORTED" + MEMORY_OPTIMIZATION_FAILED = "MEMORY_OPTIMIZATION_FAILED" + MEMORY_OPTIMIZATION_MODEL_REQUIRED = "MEMORY_OPTIMIZATION_MODEL_REQUIRED" + MEMORY_OPTIMIZATION_MODEL_INVALID = "MEMORY_OPTIMIZATION_MODEL_INVALID" + + # -- Trace -- + TRACE_NOT_FOUND = "TRACE_NOT_FOUND" + + # -- Workspace -- + WORKSPACE_NOT_FOUND = "WORKSPACE_NOT_FOUND" + WORKSPACE_ACCESS_DENIED = "WORKSPACE_ACCESS_DENIED" + WORKSPACE_PERMISSION_DENIED = "WORKSPACE_PERMISSION_DENIED" + WORKSPACE_TYPE_INVALID = "WORKSPACE_TYPE_INVALID" + WORKSPACE_MEMBER_NOT_FOUND = "WORKSPACE_MEMBER_NOT_FOUND" + WORKSPACE_MEMBER_ALREADY_EXISTS = "WORKSPACE_MEMBER_ALREADY_EXISTS" + WORKSPACE_MEMBER_ROLE_INVALID = "WORKSPACE_MEMBER_ROLE_INVALID" + WORKSPACE_MEMBER_ROLE_TOO_HIGH = "WORKSPACE_MEMBER_ROLE_TOO_HIGH" + WORKSPACE_OWNER_REMOVE_FORBIDDEN = "WORKSPACE_OWNER_REMOVE_FORBIDDEN" + WORKSPACE_OWNER_ROLE_CHANGE_FORBIDDEN = "WORKSPACE_OWNER_ROLE_CHANGE_FORBIDDEN" + WORKSPACE_OWNER_ROLE_ASSIGNMENT_FORBIDDEN = "WORKSPACE_OWNER_ROLE_ASSIGNMENT_FORBIDDEN" + WORKSPACE_LAST_ADMIN_REMOVE_FORBIDDEN = "WORKSPACE_LAST_ADMIN_REMOVE_FORBIDDEN" + PERSONAL_WORKSPACE_DELETE_FORBIDDEN = "PERSONAL_WORKSPACE_DELETE_FORBIDDEN" + PERSONAL_WORKSPACE_DUPLICATE_FORBIDDEN = "PERSONAL_WORKSPACE_DUPLICATE_FORBIDDEN" + + # -- Organization -- + ORGANIZATION_NOT_FOUND = "ORGANIZATION_NOT_FOUND" + ORGANIZATION_ACCESS_DENIED = "ORGANIZATION_ACCESS_DENIED" + ORGANIZATION_PERMISSION_DENIED = "ORGANIZATION_PERMISSION_DENIED" + ORGANIZATION_SLUG_ALREADY_EXISTS = "ORGANIZATION_SLUG_ALREADY_EXISTS" + ORGANIZATION_PLAN_UNSUPPORTED = "ORGANIZATION_PLAN_UNSUPPORTED" + ORGANIZATION_SEATS_INVALID = "ORGANIZATION_SEATS_INVALID" + ORGANIZATION_SEATS_BELOW_MEMBER_COUNT = "ORGANIZATION_SEATS_BELOW_MEMBER_COUNT" + ORGANIZATION_SEATS_UNAVAILABLE = "ORGANIZATION_SEATS_UNAVAILABLE" + ORGANIZATION_MEMBER_NOT_FOUND = "ORGANIZATION_MEMBER_NOT_FOUND" + ORGANIZATION_MEMBER_ALREADY_EXISTS = "ORGANIZATION_MEMBER_ALREADY_EXISTS" + ORGANIZATION_MEMBER_ROLE_INVALID = "ORGANIZATION_MEMBER_ROLE_INVALID" + ORGANIZATION_MEMBER_VIEW_FORBIDDEN = "ORGANIZATION_MEMBER_VIEW_FORBIDDEN" + ORGANIZATION_MEMBER_ROLE_UPDATE_FORBIDDEN = "ORGANIZATION_MEMBER_ROLE_UPDATE_FORBIDDEN" + ORGANIZATION_MEMBER_REMOVE_FORBIDDEN = "ORGANIZATION_MEMBER_REMOVE_FORBIDDEN" + ORGANIZATION_OWNER_INVITE_FORBIDDEN = "ORGANIZATION_OWNER_INVITE_FORBIDDEN" + ORGANIZATION_OWNER_REMOVE_FORBIDDEN = "ORGANIZATION_OWNER_REMOVE_FORBIDDEN" + ORGANIZATION_OWNER_ROLE_MODIFY_FORBIDDEN = "ORGANIZATION_OWNER_ROLE_MODIFY_FORBIDDEN" + ORGANIZATION_OWNER_ROLE_REASSIGN_FORBIDDEN = "ORGANIZATION_OWNER_ROLE_REASSIGN_FORBIDDEN" + ORGANIZATION_ADMIN_PROMOTE_FORBIDDEN = "ORGANIZATION_ADMIN_PROMOTE_FORBIDDEN" + ORGANIZATION_ADMIN_ROLE_TARGET_FORBIDDEN = "ORGANIZATION_ADMIN_ROLE_TARGET_FORBIDDEN" + ORGANIZATION_ADMIN_REMOVE_TARGET_FORBIDDEN = "ORGANIZATION_ADMIN_REMOVE_TARGET_FORBIDDEN" + + # -- OAuth -- + OAUTH_PROVIDER_NOT_FOUND = "OAUTH_PROVIDER_NOT_FOUND" + OAUTH_EMAIL_REQUIRED = "OAUTH_EMAIL_REQUIRED" + OAUTH_REGISTRATION_DISABLED = "OAUTH_REGISTRATION_DISABLED" + OAUTH_TOKEN_EXCHANGE_FAILED = "OAUTH_TOKEN_EXCHANGE_FAILED" + OAUTH_USERINFO_FETCH_FAILED = "OAUTH_USERINFO_FETCH_FAILED" + OAUTH_LAST_ACCOUNT_UNLINK_FORBIDDEN = "OAUTH_LAST_ACCOUNT_UNLINK_FORBIDDEN" + + # -- API token -- + API_TOKEN_INVALID = "API_TOKEN_INVALID" + API_TOKEN_REVOKED = "API_TOKEN_REVOKED" + API_TOKEN_EXPIRED = "API_TOKEN_EXPIRED" + API_TOKEN_OWNER_INACTIVE = "API_TOKEN_OWNER_INACTIVE" + + # -- Platform token -- + PLATFORM_TOKEN_NOT_FOUND = "PLATFORM_TOKEN_NOT_FOUND" + PLATFORM_TOKEN_LIMIT_EXCEEDED = "PLATFORM_TOKEN_LIMIT_EXCEEDED" + PLATFORM_TOKEN_SCOPES_INVALID = "PLATFORM_TOKEN_SCOPES_INVALID" + PLATFORM_TOKEN_REVOKE_FORBIDDEN = "PLATFORM_TOKEN_REVOKE_FORBIDDEN" + PLATFORM_TOKEN_RESOURCE_TYPE_INVALID = "PLATFORM_TOKEN_RESOURCE_TYPE_INVALID" + PLATFORM_TOKEN_RESOURCE_BINDING_INVALID = "PLATFORM_TOKEN_RESOURCE_BINDING_INVALID" + PLATFORM_TOKEN_WORKSPACE_ACCESS_DENIED = "PLATFORM_TOKEN_WORKSPACE_ACCESS_DENIED" + PLATFORM_TOKEN_TOOL_ACCESS_DENIED = "PLATFORM_TOKEN_TOOL_ACCESS_DENIED" + PLATFORM_TOKEN_SKILL_ACCESS_DENIED = "PLATFORM_TOKEN_SKILL_ACCESS_DENIED" + + # -- WebSocket -- + WEBSOCKET_INVALID_JSON = "WEBSOCKET_INVALID_JSON" + WEBSOCKET_UNKNOWN_FRAME_TYPE = "WEBSOCKET_UNKNOWN_FRAME_TYPE" + WEBSOCKET_EXECUTION_ID_REQUIRED = "WEBSOCKET_EXECUTION_ID_REQUIRED" + WEBSOCKET_INVALID_EXECUTION_ID = "WEBSOCKET_INVALID_EXECUTION_ID" + WEBSOCKET_INVALID_AFTER_SEQ = "WEBSOCKET_INVALID_AFTER_SEQ" + + # -- OpenClaw -- + OPENCLAW_INSTANCE_NOT_FOUND = "OPENCLAW_INSTANCE_NOT_FOUND" + OPENCLAW_INSTANCE_NOT_RUNNING = "OPENCLAW_INSTANCE_NOT_RUNNING" + OPENCLAW_INSTANCE_START_FAILED = "OPENCLAW_INSTANCE_START_FAILED" + OPENCLAW_INSTANCE_RESTART_FAILED = "OPENCLAW_INSTANCE_RESTART_FAILED" + OPENCLAW_CONTAINER_NOT_FOUND = "OPENCLAW_CONTAINER_NOT_FOUND" + OPENCLAW_CONTAINER_CREATE_FAILED = "OPENCLAW_CONTAINER_CREATE_FAILED" + OPENCLAW_CONTAINER_STARTUP_FAILED = "OPENCLAW_CONTAINER_STARTUP_FAILED" + OPENCLAW_GATEWAY_ENV_MISSING = "OPENCLAW_GATEWAY_ENV_MISSING" + OPENCLAW_GATEWAY_TIMEOUT = "OPENCLAW_GATEWAY_TIMEOUT" + OPENCLAW_PORT_POOL_EXHAUSTED = "OPENCLAW_PORT_POOL_EXHAUSTED" + OPENCLAW_SKILL_SYNC_FAILED = "OPENCLAW_SKILL_SYNC_FAILED" + OPENCLAW_STREAM_ERROR = "OPENCLAW_STREAM_ERROR" + OPENCLAW_DEVICE_LIST_FAILED = "OPENCLAW_DEVICE_LIST_FAILED" + OPENCLAW_DEVICE_COMMAND_FAILED = "OPENCLAW_DEVICE_COMMAND_FAILED" + OPENCLAW_DEVICE_APPROVE_FAILED = "OPENCLAW_DEVICE_APPROVE_FAILED" + OPENCLAW_DEVICE_APPROVE_ALL_FAILED = "OPENCLAW_DEVICE_APPROVE_ALL_FAILED" + + +ERROR_CODES: set[str] = {c.value for c in ErrorCode} diff --git a/backend/app/core/contracts/execution.py b/backend/app/core/contracts/execution.py new file mode 100644 index 000000000..fa611229f --- /dev/null +++ b/backend/app/core/contracts/execution.py @@ -0,0 +1,59 @@ +"""Canonical execution contract values.""" + +from __future__ import annotations + +from typing import Literal + +RunStatusLiteral = Literal["pending", "running", "succeeded", "failed", "cancelled"] +ExecutionStatusLiteral = Literal[ + "pending", + "dispatched", + "running", + "approval_wait", + "succeeded", + "failed", + "cancelled", +] +ReleaseStatusLiteral = Literal["ready", "active", "superseded", "failed", "retired"] +TriggerMediumLiteral = Literal[ + "api", + "scheduler", + "system", + "ui", +] +RunPurposeLiteral = Literal[ + "production", + "draft_test", + "debug", + "internal_builder", +] + +RUN_STATUSES: set[str] = {"pending", "running", "succeeded", "failed", "cancelled"} +ACTIVE_RUN_STATUSES: set[str] = {"pending", "running"} +TERMINAL_RUN_STATUSES: set[str] = {"succeeded", "failed", "cancelled"} + +EXECUTION_STATUSES: set[str] = { + "pending", + "dispatched", + "running", + "approval_wait", + "succeeded", + "failed", + "cancelled", +} +ACTIVE_EXECUTION_STATUSES: set[str] = {"pending", "dispatched", "running", "approval_wait"} +TERMINAL_EXECUTION_STATUSES: set[str] = {"succeeded", "failed", "cancelled"} + +RELEASE_STATUSES: set[str] = {"ready", "active", "superseded", "failed", "retired"} +TRIGGER_MEDIUMS: set[str] = { + "api", + "scheduler", + "system", + "ui", +} +RUN_PURPOSES: set[str] = { + "production", + "draft_test", + "debug", + "internal_builder", +} diff --git a/backend/app/core/copilot/action_applier.py b/backend/app/core/copilot/action_applier.py index a2d46718b..68da82f1c 100644 --- a/backend/app/core/copilot/action_applier.py +++ b/backend/app/core/copilot/action_applier.py @@ -107,7 +107,7 @@ def apply_actions_to_graph_state( actions: List of actions to apply (CREATE_NODE, CONNECT_NODES, etc.) Returns: - Tuple of (updated_nodes, updated_edges) in format ready for GraphService.save_graph_state + Tuple of (updated_nodes, updated_edges) ready for visual AgentVersion definition payloads """ # Clone current state to apply diffs processed_nodes: List[Dict[str, Any]] = [node.copy() for node in current_nodes] @@ -163,7 +163,7 @@ def apply_actions_to_graph_state( edge_exists = any(e.get("source") == source and e.get("target") == target for e in processed_edges) if not edge_exists: - # Create edge in format expected by GraphService.save_graph_state + # Create edge in the visual definition payload format. # Format matches frontend: { id, source, target, data: {} } new_edge: Dict[str, Any] = { "id": f"e-{source}-{target}", diff --git a/backend/app/core/copilot/exceptions.py b/backend/app/core/copilot/exceptions.py index 31025ed24..ee406dfe3 100644 --- a/backend/app/core/copilot/exceptions.py +++ b/backend/app/core/copilot/exceptions.py @@ -1,122 +1,70 @@ -""" -Copilot-specific exceptions for better error handling. +from typing import Any, Mapping -Provides specialized exception classes for Copilot operations, -enabling better error categorization and user-friendly error messages. -""" +from app.common.app_errors import AuthenticationError, InfraError, InternalServiceError, InvalidRequestError -from typing import Any -from fastapi import status +def _with_original_error(data: Mapping[str, Any] | None, original_error: Exception | None) -> dict[str, Any] | None: + payload = dict(data) if data else None + if original_error is None: + return payload + merged = payload or {} + merged["error_type"] = type(original_error).__name__ + merged["error_message"] = str(original_error) + return merged -from app.common.exceptions import AppException, BadRequestException - - -class CopilotException(AppException): - """Base exception for Copilot operations.""" - - def __init__( - self, - message: str = "Copilot operation failed", - *, - code: int | None = 5001, - data: Any = None, - status_code: int = status.HTTP_500_INTERNAL_SERVER_ERROR, - ): - super().__init__(status_code=status_code, message=message, code=code, data=data) - - -class CopilotLLMError(CopilotException): - """LLM-related errors (API failures, rate limits, etc.).""" +class CopilotLLMError(InfraError): def __init__( self, message: str = "LLM service error", *, - code: int | None = 5101, - data: Any = None, + code: str = "COPILOT_LLM_ERROR", + data: Mapping[str, Any] | None = None, original_error: Exception | None = None, - ): - if original_error: - message = f"{message}: {str(original_error)}" - if data is None: - data = {"error_type": type(original_error).__name__} - super().__init__( - status_code=status.HTTP_502_BAD_GATEWAY, - message=message, - code=code, - data=data, - ) - + ) -> None: + super().__init__(code=code, message=message, data=_with_original_error(data, original_error)) -class CopilotValidationError(BadRequestException): - """Action validation errors.""" +class CopilotValidationError(InvalidRequestError): def __init__( self, message: str = "Action validation failed", *, - code: int | None = 5102, - data: Any = None, - ): + code: str = "COPILOT_VALIDATION_ERROR", + data: Mapping[str, Any] | None = None, + ) -> None: super().__init__(message=message, code=code, data=data) -class CopilotSessionError(CopilotException): - """Session management errors (Redis unavailable, session not found, etc.).""" - +class CopilotSessionError(InfraError): def __init__( self, message: str = "Session management error", *, - code: int | None = 5103, - data: Any = None, - status_code: int = status.HTTP_503_SERVICE_UNAVAILABLE, - ): - super().__init__( - status_code=status_code, - message=message, - code=code, - data=data, - ) + code: str = "COPILOT_SESSION_ERROR", + data: Mapping[str, Any] | None = None, + ) -> None: + super().__init__(code=code, message=message, data=dict(data) if data else None) -class CopilotCredentialError(CopilotException): - """Credential-related errors (missing API key, invalid credentials, etc.).""" - +class CopilotCredentialError(AuthenticationError): def __init__( self, message: str = "Credential error", *, - code: int | None = 5105, - data: Any = None, - ): - super().__init__( - status_code=status.HTTP_401_UNAUTHORIZED, - message=message, - code=code, - data=data, - ) - + code: str = "COPILOT_CREDENTIAL_ERROR", + data: Mapping[str, Any] | None = None, + ) -> None: + super().__init__(message=message, code=code, data=data) -class CopilotAgentError(CopilotException): - """Agent execution errors (tool failures, recursion limits, etc.).""" +class CopilotAgentError(InternalServiceError): def __init__( self, message: str = "Agent execution error", *, - code: int | None = 5106, - data: Any = None, + code: str = "COPILOT_AGENT_ERROR", + data: Mapping[str, Any] | None = None, original_error: Exception | None = None, - ): - if original_error: - message = f"{message}: {str(original_error)}" - if data is None: - data = {"error_type": type(original_error).__name__} - super().__init__( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - message=message, - code=code, - data=data, - ) + ) -> None: + super().__init__(message=message, code=code, data=_with_original_error(data, original_error)) diff --git a/backend/app/core/copilot_deepagents/artifacts.py b/backend/app/core/copilot_deepagents/artifacts.py index c839a585f..9fb74320f 100644 --- a/backend/app/core/copilot_deepagents/artifacts.py +++ b/backend/app/core/copilot_deepagents/artifacts.py @@ -15,7 +15,6 @@ from __future__ import annotations import json -import os import re import uuid from dataclasses import dataclass, field @@ -24,6 +23,7 @@ from loguru import logger +from app.core.settings import settings from app.utils.datetime import utc_now @@ -32,7 +32,7 @@ def _default_artifacts_root() -> Path: def resolve_artifacts_root() -> Path: - env = os.getenv("DEEPAGENTS_ARTIFACTS_DIR", "").strip() + env = (settings.deepagents_artifacts_dir or "").strip() if env: return Path(env).expanduser() return _default_artifacts_root() diff --git a/backend/app/core/copilot_deepagents/manager.py b/backend/app/core/copilot_deepagents/manager.py index ab28e2450..e352b578e 100644 --- a/backend/app/core/copilot_deepagents/manager.py +++ b/backend/app/core/copilot_deepagents/manager.py @@ -9,7 +9,6 @@ from __future__ import annotations -import os import uuid from pathlib import Path from typing import TYPE_CHECKING, Any, List, Optional, Type @@ -21,7 +20,9 @@ from langchain_core.runnables import Runnable from loguru import logger +from app.common.app_errors import ServiceUnavailableError from app.core.copilot.tools import connect_nodes, create_node, delete_node, update_config +from app.core.settings import settings from .artifacts import ArtifactStore from .prompts import ( @@ -64,7 +65,7 @@ def get_artifacts_root() -> Path: """Return the artifacts root directory.""" - root = os.environ.get("DEEPAGENTS_ARTIFACTS_DIR", "") + root = settings.deepagents_artifacts_dir or "" if not root: root = str(Path.home() / ".agent-platform" / "deepagents") return Path(root) @@ -145,7 +146,11 @@ def create_copilot_manager( (manager_agent, artifact_store) """ if not DEEPAGENTS_AVAILABLE or create_deep_agent is None or FilesystemBackend is None: - raise RuntimeError("deepagents library not available. Install with: pip install deepagents") + raise ServiceUnavailableError( + "deepagents library is not available", + code="DEEPAGENTS_UNAVAILABLE", + data={"install_hint": "pip install deepagents"}, + ) assert create_deep_agent is not None and FilesystemBackend is not None # narrow types for mypy # generate run_id @@ -193,10 +198,3 @@ def create_copilot_manager( logger.info(f"[DeepAgentsCopilot] Created manager run_id={run_id} run_dir={run_dir}") return manager, store - - -# ==================== Schema Validation Helpers (Moved to .utils) ==================== - -# Re-exporting from .utils if needed, but better to import directly from .utils - -# ==================== Helpers (Moved to .utils) ==================== diff --git a/backend/app/core/copilot_deepagents/runner.py b/backend/app/core/copilot_deepagents/runner.py index ab98b09fb..f11d9e197 100644 --- a/backend/app/core/copilot_deepagents/runner.py +++ b/backend/app/core/copilot_deepagents/runner.py @@ -250,8 +250,11 @@ async def stream_copilot_manager( # Determine error code and potentially simplify message error_code = "AGENT_ERROR" + source = "runtime" + retryable = False if "api_key" in error_msg.lower() or "credential" in error_msg.lower(): error_code = "CREDENTIAL_ERROR" + source = "api" elif "RateLimitReached" in error_msg: # Try to extract a more readable message for rate limits import re @@ -262,9 +265,12 @@ async def stream_copilot_manager( error_msg = f"Rate limit reached. Please retry after {seconds} seconds." else: error_msg = "Rate limit reached. Please try again later." + retryable = True yield { "type": "error", "message": error_msg, "code": error_code, + "source": source, + "retryable": retryable, } diff --git a/backend/app/core/engine/__init__.py b/backend/app/core/engine/__init__.py new file mode 100644 index 000000000..c8817c3db --- /dev/null +++ b/backend/app/core/engine/__init__.py @@ -0,0 +1,30 @@ +""" +Engine package — unified execution engine abstraction. + +Registers all built-in engines at import time. +""" + +from app.core.engine.cli_engine import CLIEngine +from app.core.engine.code_engine import LangGraphCodeEngine +from app.core.engine.copilot_engine import CopilotEngine +from app.core.engine.graph_engine import LangGraphVisualEngine +from app.core.engine.protocol import EngineCapabilities, ExecutionContext, ExecutionEngine +from app.core.engine.registry import engine_registry + +engine_registry.register("langgraph_visual", LangGraphVisualEngine()) +engine_registry.register("langgraph_code", LangGraphCodeEngine()) +engine_registry.register("claude_code", CLIEngine("claude_code")) +engine_registry.register("codex", CLIEngine("codex")) +engine_registry.register("openclaw", CLIEngine("openclaw")) +engine_registry.register("build_copilot", CopilotEngine()) + +__all__ = [ + "CLIEngine", + "CopilotEngine", + "EngineCapabilities", + "ExecutionContext", + "ExecutionEngine", + "LangGraphCodeEngine", + "LangGraphVisualEngine", + "engine_registry", +] diff --git a/backend/app/core/engine/cli_engine.py b/backend/app/core/engine/cli_engine.py new file mode 100644 index 000000000..9c0997bd9 --- /dev/null +++ b/backend/app/core/engine/cli_engine.py @@ -0,0 +1,75 @@ +"""CLI Execution Engine — parameterized base for all CLI-based agent runtimes. + +Covers: claude_code, codex, openclaw. +""" + +from __future__ import annotations + +import uuid +from typing import Any + +from loguru import logger + +from app.core.engine.protocol import EngineCapabilities, ExecutionContext +from app.core.events.event_types import ExecutionEventType + + +class CLIEngine: + """CLI execution engine, parameterized by engine_kind.""" + + capabilities = EngineCapabilities( + supports_cancel=True, + supports_message_injection=True, + supports_debug_observation=False, + supports_artifacts=True, + supports_approval=True, + ) + + def __init__(self, engine_kind: str) -> None: + self.engine_kind = engine_kind + + async def start( + self, + context: ExecutionContext, + *, + release_runtime_binding: dict[str, Any], + engine_kind: str, + definition_payload: dict[str, Any], + prompt: str, + ) -> None: + from app.core.database import AsyncSessionLocal + + execution_id = context.execution_id + logger.info(f"[CLIEngine:{self.engine_kind}] Starting execution {execution_id}") + + await context.update_status("running") + await context.emit( + ExecutionEventType.EXECUTION_STARTED, + {"engine": self.engine_kind}, + ) + + async with AsyncSessionLocal() as db: + from app.services.runner_factory import create_execution_runner + + runner = create_execution_runner(db) + await runner.run( + execution_id=execution_id, + prompt=prompt, + credentials=context.credentials or None, + collector=context.collector, + ) + + async def cancel(self, execution_id: uuid.UUID) -> None: + from app.core.agent.cli_backends.session_registry import session_registry + + session = session_registry.get(execution_id) + if session: + await session.cancel() + logger.info(f"[CLIEngine:{self.engine_kind}] Cancelled execution {execution_id}") + + async def send_message(self, execution_id: uuid.UUID, message: str) -> None: + from app.core.agent.cli_backends.session_registry import session_registry + + session = session_registry.get(execution_id) + if session: + await session.inject_message(message) diff --git a/backend/app/core/engine/code_engine.py b/backend/app/core/engine/code_engine.py new file mode 100644 index 000000000..ce6d466fe --- /dev/null +++ b/backend/app/core/engine/code_engine.py @@ -0,0 +1,188 @@ +""" +LangGraph Code Execution Engine — sandboxed user-code executor. + +engine_kind: "langgraph_code" +Extracts a StateGraph from user Python code via execute_code(), +compiles it, and executes it with streaming events. +""" + +from __future__ import annotations + +import asyncio +import uuid +from typing import Any + +from loguru import logger +from sqlalchemy import select + +from app.common.app_errors import InternalServiceError, InvalidRequestError, normalize_app_error +from app.core.engine.protocol import EngineCapabilities, ExecutionContext +from app.core.events.event_types import ExecutionEventType + + +class LangGraphCodeEngine: + """Sandboxed code executor engine.""" + + engine_kind = "langgraph_code" + capabilities = EngineCapabilities( + supports_cancel=True, + supports_message_injection=False, + supports_debug_observation=True, + supports_artifacts=False, + supports_approval=False, + ) + + def __init__(self) -> None: + self._running: dict[uuid.UUID, Any] = {} + + async def start( + self, + context: ExecutionContext, + *, + release_runtime_binding: dict[str, Any], + engine_kind: str, + definition_payload: dict[str, Any], + prompt: str, + ) -> None: + """Extract StateGraph from user code, compile, and execute.""" + + execution_id = context.execution_id + + if engine_kind != "langgraph_code": + error = InvalidRequestError( + f"LangGraphCodeEngine cannot handle engine_kind={engine_kind}", + code="LANGGRAPH_CODE_ENGINE_KIND_MISMATCH", + data={"engine_kind": engine_kind}, + ) + await context.complete("failed", error.message, error) + return + + code = definition_payload.get("code", "") + if not code or not code.strip(): + error = InvalidRequestError( + "No code provided in definition_payload", + code="CODE_DEFINITION_EMPTY", + ) + await context.complete("failed", error.message, error) + return + + logger.info(f"[LangGraphCodeEngine] Starting execution {execution_id} ({len(code)} chars of code)") + + cancel_event = asyncio.Event() + self._running[execution_id] = cancel_event + + # ------------------------------------------------------------------ + # Observation: create root span + callback handler if collector set + # ------------------------------------------------------------------ + root_span = None + obs_handler = None + if context.collector: + root_span = context.collector.start_agent(name="code_executor") + obs_handler = context.collector.create_langchain_handler() + + try: + await context.update_status("running") + await context.emit( + ExecutionEventType.EXECUTION_STARTED, + { + "engine": "langgraph_code", + "code_length": len(code), + }, + ) + + thread_id: str | None = None + try: + from app.models.agent_run import AgentRun + + run = ( + await context.db.execute(select(AgentRun).where(AgentRun.id == context.run_id)) + ).scalar_one_or_none() + if run: + thread_id = str(run.thread_id) if run.thread_id else None + except Exception as lookup_exc: + logger.warning(f"[LangGraphCodeEngine] Could not resolve user_id/thread_id: {lookup_exc}") + + from app.core.code_executor import execute_code + + await context.emit(ExecutionEventType.ASSISTANT_TEXT, {"content": "Compiling user code..."}) + state_graph = execute_code(code) + + await context.emit(ExecutionEventType.ASSISTANT_TEXT, {"content": "Graph extracted, compiling..."}) + compiled = state_graph.compile() + + stream_config: dict[str, Any] = { + "configurable": {"thread_id": thread_id or str(execution_id)}, + } + if obs_handler: + stream_config["callbacks"] = [obs_handler] + + result_text: str = "" + async for chunk in compiled.astream( + {"messages": [{"role": "user", "content": prompt}]}, # type: ignore[arg-type] + stream_config, # type: ignore[arg-type] + ): + if cancel_event.is_set(): + await context.complete("cancelled", "Execution cancelled by user") + return + + for node_output in chunk.values(): + messages = node_output.get("messages", []) if isinstance(node_output, dict) else [] + for msg in messages: + content = getattr(msg, "content", None) or ( + msg.get("content") if isinstance(msg, dict) else None + ) + if content: + result_text = str(content) + await context.emit(ExecutionEventType.ASSISTANT_TEXT, {"content": result_text}) + + await context.complete("succeeded", result_text[:2000] if result_text else None) + + except (ValueError, ImportError, TimeoutError) as exc: + logger.warning(f"[LangGraphCodeEngine] Code execution error {execution_id}: {exc}") + app_error = normalize_app_error( + exc, + default_code="CODE_EXECUTION_INVALID", + default_message="Code execution failed", + default_data={"execution_id": str(execution_id)}, + source="engine", + ) + await context.emit(ExecutionEventType.ERROR, app_error.to_payload()) + await context.complete("failed", app_error.message[:2000], app_error) + except Exception as exc: + logger.error(f"[LangGraphCodeEngine] Execution {execution_id} failed: {exc}") + app_error = normalize_app_error( + exc, + default_code="CODE_EXECUTION_FAILED", + default_message="Code execution failed", + default_data={"execution_id": str(execution_id)}, + source="engine", + ) + await context.emit(ExecutionEventType.ERROR, app_error.to_payload()) + await context.complete("failed", app_error.message[:2000], app_error) + finally: + self._running.pop(execution_id, None) + if root_span: + try: + root_span.set_output({"status": "completed"}) + root_span.end() + except Exception: + pass + + async def cancel(self, execution_id: uuid.UUID) -> None: + event = self._running.get(execution_id) + if event: + event.set() + logger.info(f"[LangGraphCodeEngine] Cancelled execution {execution_id}") + + async def send_message(self, execution_id: uuid.UUID, message: str) -> None: + if execution_id not in self._running: + raise InternalServiceError( + "No running code execution", + code="CODE_EXECUTION_NOT_RUNNING", + data={"execution_id": str(execution_id)}, + ) + raise InvalidRequestError( + code="CODE_EXECUTION_MESSAGE_UNSUPPORTED", + message="Message injection is not supported for code executions", + data={"execution_id": str(execution_id)}, + ) diff --git a/backend/app/core/engine/copilot_engine.py b/backend/app/core/engine/copilot_engine.py new file mode 100644 index 000000000..bebad0b98 --- /dev/null +++ b/backend/app/core/engine/copilot_engine.py @@ -0,0 +1,242 @@ +""" +Graph Builder Copilot Engine — internal platform engine. + +engine_kind: "build_copilot" + +This is NOT a user-facing agent runtime. It is the AI assistant that helps +users design agent graphs on the visual canvas. It reuses the execution +pipeline (Run → Execution → EventBus → WebSocket) for streaming and +persistence, but no user-created Agent ever has runtime_kind="build_copilot". + +Wraps CopilotService streaming and maps copilot events to ExecutionEvents. +""" + +from __future__ import annotations + +import asyncio +import time +import uuid +from typing import Any + +from loguru import logger + +from app.common.app_errors import InternalServiceError, InvalidRequestError, normalize_app_error +from app.core.engine.protocol import EngineCapabilities, ExecutionContext +from app.core.events.event_types import ExecutionEventType + + +class CopilotEngine: + """Graph Builder Copilot engine (internal) — persists copilot events as ExecutionEvents.""" + + engine_kind = "build_copilot" + capabilities = EngineCapabilities( + supports_cancel=True, + supports_message_injection=False, + supports_debug_observation=False, + supports_artifacts=False, + supports_approval=False, + ) + + def __init__(self) -> None: + self._running: dict[uuid.UUID, asyncio.Event] = {} + + async def start( + self, + context: ExecutionContext, + *, + release_runtime_binding: dict[str, Any], + engine_kind: str, + definition_payload: dict[str, Any], + prompt: str, + ) -> None: + """Stream copilot events, mapping each to an ExecutionEvent.""" + + execution_id = context.execution_id + + graph_context = definition_payload.get("graph_context", {}) + conversation_history = definition_payload.get("conversation_history") + mode = definition_payload.get("mode", "deepagents") + provider_name = definition_payload.get("provider_name") + model_name = definition_payload.get("model_name") + user_id = definition_payload.get("user_id") + + logger.info(f"[CopilotEngine] Starting execution {execution_id} mode={mode}") + + cancel_event = asyncio.Event() + self._running[execution_id] = cancel_event + + try: + await context.update_status("running") + await context.emit(ExecutionEventType.EXECUTION_STARTED, {"engine": "build_copilot", "mode": mode}) + + # ------------------------------------------------------------------ + # Observation: create copilot extractor if collector set + # ------------------------------------------------------------------ + copilot_extractor = None + obs_start: float = 0.0 + if context.collector and model_name: + from app.core.observation.instrumentation.copilot_extractor import CopilotObservationExtractor + + copilot_extractor = CopilotObservationExtractor(context.collector, model_name) + obs_start = time.monotonic() + + from app.services.copilot_service import CopilotService + + service = CopilotService( + user_id=user_id, + provider_name=provider_name, + model_name=model_name, + db=context.db, + ) + + result_message = "" + result_actions: list[dict] = [] + + async for event in service.get_copilot_stream( + prompt=prompt, + graph_context=graph_context, + conversation_history=conversation_history, + mode=mode, + graph_id=definition_payload.get("graph_id"), + ): + if cancel_event.is_set(): + await context.complete("cancelled", "Cancelled by user") + return + + event_type = event.get("type", "") + + if event_type == "status": + await context.emit( + ExecutionEventType.COPILOT_STATUS, + { + "stage": event.get("stage"), + "message": event.get("message"), + }, + ) + + elif event_type == "content": + content_text = event.get("content", "") + await context.emit( + ExecutionEventType.COPILOT_CONTENT, + { + "content": content_text, + }, + ) + if copilot_extractor and content_text: + copilot_extractor.accumulate(content_text) + + elif event_type == "thought_step": + await context.emit( + ExecutionEventType.COPILOT_THOUGHT_STEP, + { + "step": event.get("step", {}), + }, + ) + + elif event_type == "tool_call": + await context.emit( + ExecutionEventType.COPILOT_TOOL_CALL, + { + "tool": event.get("tool"), + "input": event.get("input", {}), + }, + ) + + elif event_type == "tool_result": + action = event.get("action", {}) + result_actions.append(action) + await context.emit( + ExecutionEventType.COPILOT_TOOL_RESULT, + { + "action": action, + }, + ) + + elif event_type == "result": + result_message = event.get("message", "") + actions = event.get("actions", []) + result_actions = actions if actions else result_actions + await context.emit( + ExecutionEventType.COPILOT_RESULT, + { + "message": result_message, + "actions": result_actions, + }, + ) + + elif event_type == "error": + code = event.get("code") or "COPILOT_EXECUTION_FAILED" + message = event.get("message", "Unknown error") + data = event.get("data") + source = event.get("source", "runtime") + retryable = event.get("retryable", False) + user_action = event.get("user_action") + await context.emit( + ExecutionEventType.ERROR, + { + "message": message, + "code": code, + "data": data, + "source": source, + "retryable": retryable, + **({"user_action": user_action} if user_action else {}), + }, + ) + app_error = InternalServiceError( + message=message, + code=code, + data=data if isinstance(data, dict) else None, + source=source, + retryable=bool(retryable), + ) + await context.complete("failed", message[:2000], app_error) + return + + elif event_type == "done": + pass # handled by complete() below + + # Observation: flush accumulated content + if copilot_extractor: + try: + elapsed_ms = (time.monotonic() - obs_start) * 1000 + await copilot_extractor.flush( + prompt=prompt, + mode=mode, + elapsed_ms=elapsed_ms, + ) + except Exception as obs_exc: + logger.debug(f"[CopilotEngine] Observation flush error: {obs_exc}") + + await context.complete( + "succeeded", + result_message[:2000] if result_message else None, + ) + + except Exception as exc: + logger.error(f"[CopilotEngine] Execution {execution_id} failed: {exc}") + app_error = normalize_app_error( # type: ignore[assignment] + exc, + default_code="COPILOT_EXECUTION_FAILED", + default_message="Copilot execution failed", + default_data={"execution_id": str(execution_id)}, + source="engine", + ) + await context.emit(ExecutionEventType.ERROR, app_error.to_payload()) + await context.complete("failed", app_error.message[:2000], app_error) + finally: + self._running.pop(execution_id, None) + + async def cancel(self, execution_id: uuid.UUID) -> None: + """Cancel a running copilot execution.""" + event = self._running.get(execution_id) + if event: + event.set() + logger.info(f"[CopilotEngine] Cancelled execution {execution_id}") + + async def send_message(self, execution_id: uuid.UUID, message: str) -> None: + """Copilot executions don't support message injection.""" + raise InvalidRequestError( + "Message injection is not supported for copilot executions", + code="COPILOT_EXECUTION_MESSAGE_UNSUPPORTED", + data={"execution_id": str(execution_id)}, + ) diff --git a/backend/app/core/engine/graph_engine.py b/backend/app/core/engine/graph_engine.py new file mode 100644 index 000000000..3169faacd --- /dev/null +++ b/backend/app/core/engine/graph_engine.py @@ -0,0 +1,312 @@ +""" +LangGraph Visual Execution Engine — DeepAgents-based executor. + +engine_kind: "langgraph_visual" +Compiles AgentVersion.definition_payload (nodes/edges) into a DeepAgents graph and executes it. +""" + +from __future__ import annotations + +import asyncio +import uuid +from typing import Any + +from loguru import logger +from sqlalchemy import select + +from app.common.app_errors import InternalServiceError, InvalidRequestError, normalize_app_error +from app.core.engine.protocol import EngineCapabilities, ExecutionContext +from app.core.events.event_types import ExecutionEventType + +# --------------------------------------------------------------------------- +# Duck-typed shims — wrap plain dicts from definition_payload into objects +# that builder.py / config.py can consume without touching ORM models. +# --------------------------------------------------------------------------- + + +class _GraphShim: + """Minimal graph-like object derived from execution context + payload.""" + + def __init__( + self, + agent_id: Any, + workspace_id: Any, + variables: dict, + name: str = "", + ) -> None: + self.id = agent_id + self.workspace_id = workspace_id + self.variables = variables + self.name = name + self.title = name + + +class _NodeShim: + """Wraps a node dict from definition_payload['nodes'] into a duck-typed object.""" + + def __init__(self, d: dict) -> None: + self.id = d.get("id") + self.type = d.get("type", "") + self.data = d.get("data", {}) + + +class _EdgeShim: + """Wraps an edge dict from definition_payload['edges'] into a duck-typed object.""" + + def __init__(self, d: dict) -> None: + # Support both snake_case (internal) and camelCase / React-Flow key names + self.source_node_id = d.get("source_node_id") or d.get("source") + self.target_node_id = d.get("target_node_id") or d.get("target") + + +def _extract_message_contents_from_stream_chunk(chunk: Any) -> list[str]: + """Extract assistant text from LangGraph update chunks. + + DeepAgents middleware can emit ``messages=Overwrite([...])`` to replace state. + That is a state update, not a new assistant message, and it is not iterable. + """ + if not isinstance(chunk, dict): + return [] + + contents: list[str] = [] + for node_output in chunk.values(): + if not isinstance(node_output, dict): + continue + + messages = node_output.get("messages", []) + for msg in _iter_stream_messages(messages): + if not _is_assistant_message(msg): + continue + + content = getattr(msg, "content", None) or (msg.get("content") if isinstance(msg, dict) else None) + if content: + contents.append(str(content)) + + return contents + + +def _iter_stream_messages(messages: Any) -> list[Any]: + if _is_overwrite_update(messages): + return [] + if isinstance(messages, list | tuple): + return list(messages) + if isinstance(messages, dict): + return [messages] if "content" in messages else [] + if hasattr(messages, "content"): + return [messages] + return [] + + +def _is_overwrite_update(value: Any) -> bool: + if value.__class__.__name__ == "Overwrite" and hasattr(value, "value"): + return True + return isinstance(value, dict) and set(value.keys()) == {"__overwrite__"} + + +def _is_assistant_message(message: Any) -> bool: + if isinstance(message, dict): + role = message.get("role") or message.get("type") + return role in {"assistant", "ai"} + return getattr(message, "type", None) == "ai" or getattr(message, "role", None) == "assistant" + + +# --------------------------------------------------------------------------- +# Engine +# --------------------------------------------------------------------------- + + +class LangGraphVisualEngine: + """DeepAgents compiler + executor engine.""" + + engine_kind = "langgraph_visual" + capabilities = EngineCapabilities( + supports_cancel=True, + supports_message_injection=False, + supports_debug_observation=True, + supports_artifacts=True, + supports_approval=True, + ) + + def __init__(self) -> None: + self._running: dict[uuid.UUID, Any] = {} + + async def start( + self, + context: ExecutionContext, + *, + release_runtime_binding: dict[str, Any], + engine_kind: str, + definition_payload: dict[str, Any], + prompt: str, + ) -> None: + """Compile graph definition and execute via DeepAgents.""" + + execution_id = context.execution_id + + if engine_kind != "langgraph_visual": + error = InvalidRequestError( + f"LangGraphVisualEngine cannot handle engine_kind={engine_kind}", + code="LANGGRAPH_VISUAL_ENGINE_KIND_MISMATCH", + data={"engine_kind": engine_kind}, + ) + await context.complete("failed", error.message, error) + return + + raw_nodes = definition_payload.get("nodes", []) + raw_edges = definition_payload.get("edges", []) + variables = definition_payload.get("variables", {}) or {} + + if not raw_nodes: + error = InvalidRequestError( + "Graph definition has no nodes", + code="GRAPH_DEFINITION_NODES_EMPTY", + ) + await context.complete("failed", error.message, error) + return + + logger.info(f"[LangGraphVisualEngine] Starting execution {execution_id} with {len(raw_nodes)} nodes") + + cancel_event = asyncio.Event() + self._running[execution_id] = cancel_event + + await context.update_status("running") + await context.emit( + ExecutionEventType.EXECUTION_STARTED, + { + "engine": "langgraph_visual", + "node_count": len(raw_nodes), + "edge_count": len(raw_edges), + }, + ) + + # ------------------------------------------------------------------ + # Resolve user_id and thread_id from the AgentRun linked to this + # execution, so builder.py can initialise memory / sandbox correctly. + # ------------------------------------------------------------------ + user_id: str | None = None + thread_id: str | None = None + try: + from app.models.agent_run import AgentRun + + run = (await context.db.execute(select(AgentRun).where(AgentRun.id == context.run_id))).scalar_one_or_none() + if run: + user_id = run.created_by + thread_id = str(run.thread_id) if run.thread_id else None + except Exception as lookup_exc: # pragma: no cover + logger.warning(f"[LangGraphVisualEngine] Could not resolve user_id/thread_id: {lookup_exc}") + + # ------------------------------------------------------------------ + # Wrap plain dicts into duck-typed shim objects + # ------------------------------------------------------------------ + nodes = [_NodeShim(n) for n in raw_nodes] + edges = [_EdgeShim(e) for e in raw_edges] + graph = _GraphShim( + agent_id=context.run_id, # stable surrogate — run_id is graph-level unique here + workspace_id=context.workspace_id, + variables=variables, + name=definition_payload.get("name", ""), + ) + + # ------------------------------------------------------------------ + # Observation: create root span + callback handler if collector set + # ------------------------------------------------------------------ + root_span = None + obs_handler = None + if context.collector: + graph_name = definition_payload.get("name", "graph") + root_span = context.collector.start_agent(name=f"root:{graph_name}") + obs_handler = context.collector.create_langchain_handler() + + try: + # ------------------------------------------------------------------ + # Build deep-agents graph + # ------------------------------------------------------------------ + from app.core.graph.deep_agents.builder import build_deep_agents_graph + from app.services.model_service import ModelService + + model_service = ModelService(context.db) + + compiled = await build_deep_agents_graph( + graph, + nodes, + edges, + user_id=user_id, + model_service=model_service, + thread_id=thread_id, + ) + + # ------------------------------------------------------------------ + # Run the compiled agent and stream events back through context + # ------------------------------------------------------------------ + stream_config: dict[str, Any] = { + "configurable": {"thread_id": thread_id or str(execution_id)}, + } + if obs_handler: + stream_config["callbacks"] = [obs_handler] + + result_text: str = "" + async for chunk in compiled.astream( + {"messages": [{"role": "user", "content": prompt}]}, + stream_config, + ): + if cancel_event.is_set(): + await context.complete("cancelled", "Execution cancelled by user") + return + + # deepagents yields dicts keyed by node name; extract text chunks + for content in _extract_message_contents_from_stream_chunk(chunk): + result_text = content + await context.emit(ExecutionEventType.ASSISTANT_TEXT, {"content": result_text}) + + # ------------------------------------------------------------------ + # Cleanup sandbox if the compiled agent holds one + # ------------------------------------------------------------------ + sandbox_handle = getattr(compiled, "_sandbox_handle", None) + if sandbox_handle: + try: + await sandbox_handle.release() + except Exception as cleanup_exc: # pragma: no cover + logger.warning(f"[LangGraphVisualEngine] Sandbox cleanup failed: {cleanup_exc}") + + await context.complete("succeeded", result_text[:2000] if result_text else None) + + except Exception as exc: + logger.error(f"[LangGraphVisualEngine] Execution {execution_id} failed: {exc}") + app_error = normalize_app_error( + exc, + default_code="GRAPH_EXECUTION_FAILED", + default_message="Graph execution failed", + default_data={"execution_id": str(execution_id)}, + source="engine", + ) + await context.emit(ExecutionEventType.ERROR, app_error.to_payload()) + await context.complete("failed", app_error.message[:2000], app_error) + finally: + self._running.pop(execution_id, None) + if root_span: + try: + root_span.set_output({"status": "completed"}) + root_span.end() + except Exception: + pass + + async def cancel(self, execution_id: uuid.UUID) -> None: + """Cancel a running graph execution.""" + event = self._running.get(execution_id) + if event: + event.set() + logger.info(f"[LangGraphVisualEngine] Cancelled execution {execution_id}") + + async def send_message(self, execution_id: uuid.UUID, message: str) -> None: + """Graph executions don't support message injection (yet).""" + if execution_id not in self._running: + raise InternalServiceError( + "No running graph execution", + code="GRAPH_EXECUTION_NOT_RUNNING", + data={"execution_id": str(execution_id)}, + ) + raise InvalidRequestError( + "Message injection is not yet supported for graph executions", + code="GRAPH_EXECUTION_MESSAGE_UNSUPPORTED", + data={"execution_id": str(execution_id)}, + ) diff --git a/backend/app/core/engine/protocol.py b/backend/app/core/engine/protocol.py new file mode 100644 index 000000000..280267a88 --- /dev/null +++ b/backend/app/core/engine/protocol.py @@ -0,0 +1,121 @@ +""" +Execution engine protocol — the stable abstraction layer. + +All execution engines implement this protocol. +The orchestrator dispatches to engines via the registry; engines emit events via context. +""" + +from __future__ import annotations + +import uuid +from dataclasses import dataclass, field +from typing import Any, Protocol, runtime_checkable + +from sqlalchemy.ext.asyncio import AsyncSession + +from app.common.app_errors import AppError +from app.core.events.event_types import ExecutionEventType + + +@dataclass +class ExecutionContext: + """ + Provided to every engine at start time. + + Engines use this to emit events, update status, and signal completion. + The context handles persistence (ExecutionEvent rows) and real-time push + (WebSocket broadcast) — engines never touch those layers directly. + """ + + db: AsyncSession + execution_id: uuid.UUID + run_id: uuid.UUID + workspace_id: uuid.UUID + credentials: dict[str, str] = field(default_factory=dict) + auto_approve: bool = True + metadata: dict[str, Any] = field(default_factory=dict) + debug: bool = False + collector: Any = None # ObservationCollector | None — import avoided for no circular deps + + # ---- set by orchestrator after construction ---- + _emit_fn: Any = None # async (event_type, payload) -> None + _status_fn: Any = None # async (status) -> None + _complete_fn: Any = None # async (status, result_summary, error) -> None + + async def emit(self, event_type: ExecutionEventType, payload: dict | None = None) -> None: + """Emit an execution event → persisted + broadcast.""" + if self._emit_fn: + await self._emit_fn(event_type, payload or {}) + + async def update_status(self, status: str) -> None: + """Update Execution.status without completing.""" + if self._status_fn: + await self._status_fn(status) + + async def complete( + self, + status: str, + result_summary: str | None = None, + error: AppError | None = None, + ) -> None: + """Mark execution as terminal → updates Run + Task status.""" + if self._complete_fn: + await self._complete_fn(status, result_summary, error) + + +@dataclass(frozen=True) +class EngineCapabilities: + supports_cancel: bool = False + supports_message_injection: bool = False + supports_debug_observation: bool = False + supports_artifacts: bool = False + supports_approval: bool = False + + +@runtime_checkable +class ExecutionEngine(Protocol): + """ + Stable interface for all execution engines. + + User-facing engines: + - LangGraphVisualEngine (engine_kind: langgraph_visual) + - LangGraphCodeEngine (engine_kind: langgraph_code) + - ClaudeCodeEngine (engine_kind: claude_code) + - CodexEngine (engine_kind: codex) + - OpenClawEngine (engine_kind: openclaw) + + Internal platform engines: + - CopilotEngine (engine_kind: build_copilot) + """ + + engine_kind: str + capabilities: EngineCapabilities + + async def start( + self, + context: ExecutionContext, + *, + release_runtime_binding: dict[str, Any], + engine_kind: str, + definition_payload: dict[str, Any], + prompt: str, + ) -> None: + """ + Start execution. Events flow through context.emit(). + + Args: + context: execution context with emit/status/complete callbacks + release_runtime_binding: from AgentRelease.runtime_binding + engine_kind: "langgraph_visual" | "langgraph_code" | "claude_code" | "codex" | "openclaw" + definition_payload: from AgentVersion.definition_payload + prompt: the user prompt or task goal + """ + ... + + async def cancel(self, execution_id: uuid.UUID) -> None: + """Cancel a running execution.""" + ... + + async def send_message(self, execution_id: uuid.UUID, message: str) -> None: + """Inject a human message into a running execution.""" + ... diff --git a/backend/app/core/engine/registry.py b/backend/app/core/engine/registry.py new file mode 100644 index 000000000..d66637292 --- /dev/null +++ b/backend/app/core/engine/registry.py @@ -0,0 +1,38 @@ +""" +Engine registry — maps engine_kind to ExecutionEngine instances. +""" + +from __future__ import annotations + +from app.common.app_errors import NotFoundError +from app.core.engine.protocol import ExecutionEngine + + +class EngineRegistry: + """Singleton registry: engine_kind → ExecutionEngine.""" + + def __init__(self) -> None: + self._engines: dict[str, ExecutionEngine] = {} + + def register(self, engine_kind: str, engine: ExecutionEngine) -> None: + self._engines[engine_kind] = engine + + def has(self, engine_kind: str) -> bool: + return engine_kind in self._engines + + def get(self, engine_kind: str) -> ExecutionEngine: + engine = self._engines.get(engine_kind) + if not engine: + available = ", ".join(self._engines.keys()) or "(none)" + raise NotFoundError( + "Execution engine is not registered", + code="EXECUTION_ENGINE_NOT_REGISTERED", + data={"engine_kind": engine_kind, "available_engine_kinds": available}, + ) + return engine + + def list_kinds(self) -> list[str]: + return list(self._engines.keys()) + + +engine_registry = EngineRegistry() diff --git a/backend/app/core/events/__init__.py b/backend/app/core/events/__init__.py new file mode 100644 index 000000000..f23b5c934 --- /dev/null +++ b/backend/app/core/events/__init__.py @@ -0,0 +1,15 @@ +"""Events infrastructure — unified execution event bus.""" + +from app.core.events.bus import ExecutionEventBus, execution_event_bus +from app.core.events.envelope import ExecutionEventEnvelope +from app.core.events.event_types import ExecutionEventType +from app.core.events.subscriber import EventSubscriber, SubscriberPhase + +__all__ = [ + "ExecutionEventBus", + "ExecutionEventEnvelope", + "ExecutionEventType", + "EventSubscriber", + "SubscriberPhase", + "execution_event_bus", +] diff --git a/backend/app/core/events/bus.py b/backend/app/core/events/bus.py new file mode 100644 index 000000000..9585637d2 --- /dev/null +++ b/backend/app/core/events/bus.py @@ -0,0 +1,78 @@ +""" +ExecutionEventBus — unified event pipeline. + +Phase 1 subscribers share the caller's DB session and run sequentially. +The bus commits once after all Phase 1 subscribers complete. + +Phase 2 subscribers run in parallel with independent sessions. +A failure in one does not affect the others. +""" + +from __future__ import annotations + +import asyncio +from typing import TYPE_CHECKING + +from loguru import logger + +from app.core.events.subscriber import EventSubscriber, SubscriberPhase + +if TYPE_CHECKING: + from sqlalchemy.ext.asyncio import AsyncSession + + from app.core.events.envelope import ExecutionEventEnvelope + + +class ExecutionEventBus: + def __init__(self) -> None: + self._persist_subs: list[EventSubscriber] = [] + self._broadcast_subs: list[EventSubscriber] = [] + + def register(self, sub: EventSubscriber) -> None: + if sub.phase == SubscriberPhase.PERSIST: + self._persist_subs.append(sub) + else: + self._broadcast_subs.append(sub) + logger.info(f"[EventBus] Registered subscriber: {sub.name} (phase={sub.phase.name})") + + async def publish(self, envelope: ExecutionEventEnvelope, db: AsyncSession) -> None: + # Phase 1: shared transaction, sequential + for sub in self._persist_subs: + await sub.handle(envelope, db=db) + await db.commit() + + # Phase 2: independent sessions, parallel fan-out + await self._fan_out([envelope]) + + async def publish_batch( + self, + envelopes: list[ExecutionEventEnvelope], + db: AsyncSession, + ) -> None: + """Publish multiple envelopes in a single transaction. + + Phase 1 processes all envelopes sequentially, then commits once. + Phase 2 fans out all envelopes in parallel. + """ + for envelope in envelopes: + for sub in self._persist_subs: + await sub.handle(envelope, db=db) + await db.commit() + + await self._fan_out(envelopes) + + async def _fan_out(self, envelopes: list[ExecutionEventEnvelope]) -> None: + if not self._broadcast_subs: + return + tasks = [sub.handle(envelope) for envelope in envelopes for sub in self._broadcast_subs] + results = await asyncio.gather(*tasks, return_exceptions=True) + for i, result in enumerate(results): + if isinstance(result, Exception): + sub_idx = i % len(self._broadcast_subs) + logger.warning( + f"[EventBus] {self._broadcast_subs[sub_idx].name} failed: {result}", + exc_info=result, + ) + + +execution_event_bus = ExecutionEventBus() diff --git a/backend/app/core/events/envelope.py b/backend/app/core/events/envelope.py new file mode 100644 index 000000000..5f3a44a2f --- /dev/null +++ b/backend/app/core/events/envelope.py @@ -0,0 +1,42 @@ +""" +Canonical event envelope — the single shape all subscribers receive. +""" + +from __future__ import annotations + +import uuid +from dataclasses import dataclass, field +from datetime import datetime +from typing import Any, Optional + +from app.core.events.event_types import ExecutionEventType +from app.utils.datetime import utc_now + + +@dataclass +class ExecutionEventEnvelope: + """Canonical event envelope flowing through the event bus.""" + + execution_id: uuid.UUID + run_id: uuid.UUID + workspace_id: uuid.UUID + event_type: ExecutionEventType | str + payload: dict[str, Any] = field(default_factory=dict) + created_at: datetime = field(default_factory=utc_now) + seq: int = 0 # filled by PersistenceSubscriber in Phase 1 + + # Run metadata — subscribers use these for routing decisions + trigger_medium: Optional[str] = None + run_purpose: Optional[str] = None + thread_id: Optional[uuid.UUID] = None + task_id: Optional[uuid.UUID] = None + + # Completion-only fields + terminal_status: Optional[str] = None + result_summary: Optional[str] = None + error: Optional[dict[str, Any]] = None # ErrorDescriptor via AppError.to_payload() + + # Status-change fields (used by execution_status_change events) + target_status: Optional[str] = None + container_id: Optional[str] = None + metrics: Optional[dict[str, Any]] = None diff --git a/backend/app/core/events/event_types.py b/backend/app/core/events/event_types.py new file mode 100644 index 000000000..941b39811 --- /dev/null +++ b/backend/app/core/events/event_types.py @@ -0,0 +1,36 @@ +""" +Canonical execution event types — the single source of truth for event naming. + +Backend emitters MUST use these constants. The frontend type union mirrors them. +""" + +from __future__ import annotations + +from enum import StrEnum + + +class ExecutionEventType(StrEnum): + # Content events (mapped from CLI message types by ExecutionRunner) + ASSISTANT_TEXT = "assistant_text" + THINKING = "thinking" + TOOL_USE_START = "tool_use_start" + TOOL_USE_END = "tool_use_end" + ERROR = "error" + ARTIFACT_CREATED = "artifact_created" + APPROVAL_REQUESTED = "approval_requested" + APPROVAL_RESOLVED = "approval_resolved" + USER_MESSAGE = "user_message" + + # Lifecycle events + EXECUTION_STARTED = "execution_started" + EXECUTION_COMPLETED = "execution_completed" + EXECUTION_STATUS_CHANGE = "execution_status_change" + RUN_STATUS_CHANGE = "run_status_change" + + # Copilot events (mapped from CopilotService stream events by CopilotEngine) + COPILOT_STATUS = "copilot_status" + COPILOT_CONTENT = "copilot_content" + COPILOT_THOUGHT_STEP = "copilot_thought_step" + COPILOT_TOOL_CALL = "copilot_tool_call" + COPILOT_TOOL_RESULT = "copilot_tool_result" + COPILOT_RESULT = "copilot_result" diff --git a/backend/app/core/events/subscriber.py b/backend/app/core/events/subscriber.py new file mode 100644 index 000000000..6a4727a3c --- /dev/null +++ b/backend/app/core/events/subscriber.py @@ -0,0 +1,30 @@ +""" +Subscriber protocol and phase enum. +""" + +from __future__ import annotations + +from enum import Enum +from typing import TYPE_CHECKING, Optional, Protocol, runtime_checkable + +if TYPE_CHECKING: + from sqlalchemy.ext.asyncio import AsyncSession + + from app.core.events.envelope import ExecutionEventEnvelope + + +class SubscriberPhase(Enum): + PERSIST = 1 + BROADCAST = 2 + + +@runtime_checkable +class EventSubscriber(Protocol): + name: str + phase: SubscriberPhase + + async def handle( + self, + envelope: ExecutionEventEnvelope, + db: Optional[AsyncSession] = None, + ) -> None: ... diff --git a/backend/app/core/events/subscribers/__init__.py b/backend/app/core/events/subscribers/__init__.py new file mode 100644 index 000000000..4aa51e593 --- /dev/null +++ b/backend/app/core/events/subscribers/__init__.py @@ -0,0 +1 @@ +"""Event subscribers.""" diff --git a/backend/app/core/events/subscribers/persistence.py b/backend/app/core/events/subscribers/persistence.py new file mode 100644 index 000000000..24c295c4e --- /dev/null +++ b/backend/app/core/events/subscribers/persistence.py @@ -0,0 +1,79 @@ +""" +PersistenceSubscriber — Phase 1. + +Writes ExecutionEvent rows to the database and fills envelope.seq. +Flushes but does NOT commit — the bus commits after all Phase 1 subscribers. +""" + +from __future__ import annotations + +from collections import defaultdict +from typing import Optional + +from sqlalchemy import func, select +from sqlalchemy.ext.asyncio import AsyncSession + +from app.common.app_errors import InternalServiceError +from app.core.events.envelope import ExecutionEventEnvelope +from app.core.events.event_types import ExecutionEventType +from app.core.events.subscriber import SubscriberPhase +from app.models.execution import ExecutionEvent + + +class PersistenceSubscriber: + name = "persistence" + phase = SubscriberPhase.PERSIST + + def __init__(self) -> None: + # single-process sequence cache: + # This in-memory counter avoids a MAX() query on every event and is safe + # only when one backend process owns event writes for an execution and + # those writes are serialized/single-writer per execution. Multi-worker + # or multi-instance deployments need distributed event sequencing before + # this cache can be treated as globally safe. + self._seq_cache: dict[str, int] = defaultdict(int) + + async def handle( + self, + envelope: ExecutionEventEnvelope, + db: Optional[AsyncSession] = None, + ) -> None: + if db is None: + raise InternalServiceError( + "Persistence subscriber requires a database session", + code="EVENT_SUBSCRIBER_DB_SESSION_MISSING", + data={"subscriber": self.name}, + ) + + eid = str(envelope.execution_id) + + # Seed cache on first event for this execution + if eid not in self._seq_cache: + max_seq = ( + await db.execute( + select(func.coalesce(func.max(ExecutionEvent.sequence_no), 0)).where( + ExecutionEvent.execution_id == envelope.execution_id + ) + ) + ).scalar() + self._seq_cache[eid] = max_seq or 0 + + self._seq_cache[eid] += 1 + seq = self._seq_cache[eid] + + event = ExecutionEvent( + execution_id=envelope.execution_id, + sequence_no=seq, + event_type=envelope.event_type, + payload=envelope.payload, + ) + db.add(event) + await db.flush() + + # Fill seq so Phase 2 subscribers can use it. + # Safe: Phase 2 runs only after Phase 1 completes and bus commits. + envelope.seq = seq + + # Clean up cache on terminal events + if envelope.event_type == ExecutionEventType.EXECUTION_COMPLETED: + self._seq_cache.pop(eid, None) diff --git a/backend/app/core/events/subscribers/state_transition.py b/backend/app/core/events/subscribers/state_transition.py new file mode 100644 index 000000000..69a89ab6c --- /dev/null +++ b/backend/app/core/events/subscribers/state_transition.py @@ -0,0 +1,141 @@ +""" +StateTransitionSubscriber — Phase 1. + +Handles execution and run state transitions driven by events: +- execution_status_change → non-terminal transitions (dispatched, running, approval_wait) +- execution_completed → terminal transitions (succeeded, failed, cancelled) for both Execution and Run +- run_status_change → direct Run transitions (e.g. running, cancelled, reaper-failed) + +Flushes but does NOT commit — the bus commits once after all Phase 1 subscribers. +""" + +from __future__ import annotations + +from typing import Optional + +from loguru import logger +from sqlalchemy import select +from sqlalchemy.ext.asyncio import AsyncSession + +from app.common.app_errors import InternalServiceError +from app.core.events.envelope import ExecutionEventEnvelope +from app.core.events.event_types import ExecutionEventType +from app.core.events.subscriber import SubscriberPhase +from app.core.state_machines.definitions import EXECUTION_TERMINAL +from app.core.state_machines.engine import InvalidTransition +from app.core.state_machines.transitions import transition_execution, transition_run +from app.models.agent_run import AgentRun +from app.models.execution import Execution + +_HANDLED = { + ExecutionEventType.EXECUTION_STATUS_CHANGE, + ExecutionEventType.EXECUTION_COMPLETED, + ExecutionEventType.RUN_STATUS_CHANGE, +} + + +class StateTransitionSubscriber: + name = "state_transition" + phase = SubscriberPhase.PERSIST + + async def handle( + self, + envelope: ExecutionEventEnvelope, + db: Optional[AsyncSession] = None, + ) -> None: + if envelope.event_type not in _HANDLED: + return + + if db is None: + raise InternalServiceError( + "State transition subscriber requires a database session", + code="EVENT_SUBSCRIBER_DB_SESSION_MISSING", + data={"subscriber": self.name}, + ) + + if envelope.event_type == ExecutionEventType.EXECUTION_STATUS_CHANGE: + await self._handle_status_change(envelope, db) + elif envelope.event_type == ExecutionEventType.EXECUTION_COMPLETED: + await self._handle_completed(envelope, db) + elif envelope.event_type == ExecutionEventType.RUN_STATUS_CHANGE: + await self._handle_run_status_change(envelope, db) + + async def _handle_status_change(self, envelope: ExecutionEventEnvelope, db: AsyncSession) -> None: + if not envelope.target_status: + raise InternalServiceError( + "Execution status change event is missing target status", + code="EVENT_TARGET_STATUS_MISSING", + data={"event_type": str(envelope.event_type), "execution_id": str(envelope.execution_id)}, + ) + + if envelope.target_status in EXECUTION_TERMINAL: + raise InternalServiceError( + "Terminal execution status must use execution completed events", + code="EVENT_TERMINAL_STATUS_INVALID", + data={ + "event_type": str(envelope.event_type), + "execution_id": str(envelope.execution_id), + "target_status": envelope.target_status, + }, + ) + + execution = (await db.execute(select(Execution).where(Execution.id == envelope.execution_id))).scalar_one() + + try: + await transition_execution(execution, envelope.target_status, db) + except InvalidTransition: + logger.warning(f"[StateTransition] Skipping execution {execution.id}: already {execution.status}") + return + + self._apply_metadata(execution, envelope) + await db.flush() + + async def _handle_completed(self, envelope: ExecutionEventEnvelope, db: AsyncSession) -> None: + if not envelope.terminal_status: + raise InternalServiceError( + "Execution completed event is missing terminal status", + code="EVENT_TERMINAL_STATUS_MISSING", + data={"event_type": str(envelope.event_type), "execution_id": str(envelope.execution_id)}, + ) + + execution = (await db.execute(select(Execution).where(Execution.id == envelope.execution_id))).scalar_one() + try: + await transition_execution(execution, envelope.terminal_status, db) + except InvalidTransition: + logger.warning(f"[StateTransition] Skipping execution {execution.id}: already {execution.status}") + return + + self._apply_metadata(execution, envelope) + + run = (await db.execute(select(AgentRun).where(AgentRun.id == envelope.run_id))).scalar_one() + try: + await transition_run(run, envelope.terminal_status, db, envelope.result_summary) + except InvalidTransition: + logger.warning(f"[StateTransition] Skipping run {run.id}: already {run.status}") + + await db.flush() + + async def _handle_run_status_change(self, envelope: ExecutionEventEnvelope, db: AsyncSession) -> None: + if not envelope.target_status: + raise InternalServiceError( + "Run status change event is missing target status", + code="EVENT_TARGET_STATUS_MISSING", + data={"event_type": str(envelope.event_type), "run_id": str(envelope.run_id)}, + ) + + run = (await db.execute(select(AgentRun).where(AgentRun.id == envelope.run_id))).scalar_one() + + try: + await transition_run(run, envelope.target_status, db, envelope.result_summary) + except InvalidTransition: + logger.warning(f"[StateTransition] Skipping run {run.id}: already {run.status}") + + @staticmethod + def _apply_metadata(execution: Execution, envelope: ExecutionEventEnvelope) -> None: + """Write optional metadata fields from the envelope onto the execution row.""" + if envelope.error is not None: + execution.error = envelope.error + if envelope.container_id is not None: + execution.runtime_session_ref = envelope.container_id + if envelope.metrics is not None: + execution.metrics = envelope.metrics diff --git a/backend/app/core/events/subscribers/task_sync.py b/backend/app/core/events/subscribers/task_sync.py new file mode 100644 index 000000000..b2b0a4c90 --- /dev/null +++ b/backend/app/core/events/subscribers/task_sync.py @@ -0,0 +1,56 @@ +""" +TaskSyncSubscriber — Phase 2. + +On execution_completed or run terminal status change, syncs the task status +from the run. Uses an independent DB session. +""" + +from __future__ import annotations + +from typing import Optional + +from loguru import logger +from sqlalchemy import select +from sqlalchemy.ext.asyncio import AsyncSession + +from app.core.events.envelope import ExecutionEventEnvelope +from app.core.events.event_types import ExecutionEventType +from app.core.events.subscriber import SubscriberPhase +from app.core.state_machines.definitions import RUN_TERMINAL + +_HANDLED = { + ExecutionEventType.EXECUTION_COMPLETED, + ExecutionEventType.RUN_STATUS_CHANGE, +} + + +class TaskSyncSubscriber: + name = "task_sync" + phase = SubscriberPhase.BROADCAST + + async def handle( + self, + envelope: ExecutionEventEnvelope, + db: Optional[AsyncSession] = None, + ) -> None: + if envelope.event_type not in _HANDLED: + return + + if envelope.event_type == ExecutionEventType.RUN_STATUS_CHANGE: + if envelope.target_status not in RUN_TERMINAL: + return + + from app.core.database import AsyncSessionLocal + from app.core.state_machines.transitions import sync_task_from_run + from app.models.agent_run import AgentRun + + async with AsyncSessionLocal() as session: + run = (await session.execute(select(AgentRun).where(AgentRun.id == envelope.run_id))).scalar_one_or_none() + if not run: + logger.warning(f"[TaskSync] Run {envelope.run_id} not found") + return + if not run.task_id: + return + await sync_task_from_run(run, session) + await session.commit() + logger.info(f"[TaskSync] Synced task {run.task_id} from run {envelope.run_id}") diff --git a/backend/app/core/events/subscribers/websocket.py b/backend/app/core/events/subscribers/websocket.py new file mode 100644 index 000000000..7a03db8c3 --- /dev/null +++ b/backend/app/core/events/subscribers/websocket.py @@ -0,0 +1,59 @@ +""" +WebSocketSubscriber — Phase 2. + +Broadcasts events to frontend clients via the existing subscription manager. +""" + +from __future__ import annotations + +from typing import Optional + +from sqlalchemy.ext.asyncio import AsyncSession + +from app.common.app_errors import InternalServiceError +from app.core.events.envelope import ExecutionEventEnvelope +from app.core.events.event_types import ExecutionEventType +from app.core.events.subscriber import SubscriberPhase +from app.websocket.execution_subscription_manager import execution_subscription_manager + + +class WebSocketSubscriber: + name = "websocket" + phase = SubscriberPhase.BROADCAST + + async def handle( + self, + envelope: ExecutionEventEnvelope, + db: Optional[AsyncSession] = None, + ) -> None: + eid = str(envelope.execution_id) + + if envelope.event_type == ExecutionEventType.EXECUTION_COMPLETED: + payload: dict[str, object] = { + "type": "execution_completed", + "execution_id": eid, + "run_id": str(envelope.run_id), + "status": envelope.terminal_status, + } + if envelope.terminal_status == "failed": + if envelope.error is None: + raise InternalServiceError( + "Failed execution completed event is missing error payload", + code="EVENT_ERROR_PAYLOAD_MISSING", + data={"event_type": str(envelope.event_type), "execution_id": eid}, + ) + payload["error"] = envelope.error + await execution_subscription_manager.broadcast_event(eid, payload) + execution_subscription_manager.remove_execution(eid) + else: + await execution_subscription_manager.broadcast_event( + eid, + { + "type": "event", + "execution_id": eid, + "seq": envelope.seq, + "event_type": envelope.event_type, + "payload": envelope.payload, + "created_at": envelope.created_at.isoformat() if envelope.created_at else None, + }, + ) diff --git a/backend/app/core/graph/deep_agents/builder.py b/backend/app/core/graph/deep_agents/builder.py index a39f716a8..3727ccb0c 100644 --- a/backend/app/core/graph/deep_agents/builder.py +++ b/backend/app/core/graph/deep_agents/builder.py @@ -7,11 +7,14 @@ from __future__ import annotations import asyncio -from typing import Any, List, Optional + +# app.models.graph was removed; use Protocol stubs for type-checking only. +# At runtime, duck-typed shim objects (from graph_engine.py) satisfy these contracts. +from typing import TYPE_CHECKING, Any, List, Optional from loguru import logger -from app.common.exceptions import AppException +from app.common.app_errors import ServiceUnavailableError from app.core.agent.backends.constants import DOCKER_UNAVAILABLE_MSG from app.core.agent.backends.docker_check import is_docker_available from app.core.graph.deep_agents.agent_factory import ( @@ -29,15 +32,35 @@ ) from app.core.graph.deep_agents.tool_resolver import resolve_tools from app.core.graph.runtime_prompt_template import build_runtime_prompt_context, render_runtime_template -from app.models.graph import AgentGraph, GraphEdge, GraphNode + +if TYPE_CHECKING: + from typing import Protocol + + class GraphLike(Protocol): + id: Any + workspace_id: Any + variables: Any + + class NodeLike(Protocol): + id: Any + type: str + data: dict + + class EdgeLike(Protocol): + source_node_id: Any + target_node_id: Any + + AgentGraph = GraphLike # type: ignore[assignment] + GraphNode = NodeLike # type: ignore[assignment] + GraphEdge = EdgeLike # type: ignore[assignment] LOG_PREFIX = "[DeepAgentsBuilder]" async def build_deep_agents_graph( - graph: AgentGraph, - nodes: List[GraphNode], - edges: List[GraphEdge], + graph: Any, + nodes: List[Any], + edges: List[Any], user_id: Optional[Any] = None, model_service: Optional[Any] = None, thread_id: Optional[str] = None, @@ -89,8 +112,7 @@ async def build_deep_agents_graph( for cfg in all_configs if cfg.node_type == "code_agent" and cfg.executor_type == "docker" ] - raise AppException( - status_code=503, + raise ServiceUnavailableError( message=( f"{DOCKER_UNAVAILABLE_MSG} " f'Agent "{graph_name}" requires Docker for node(s): {", ".join(docker_nodes)}.' diff --git a/backend/app/core/graph/deep_agents/config.py b/backend/app/core/graph/deep_agents/config.py index 606da364e..613c69765 100644 --- a/backend/app/core/graph/deep_agents/config.py +++ b/backend/app/core/graph/deep_agents/config.py @@ -1,8 +1,12 @@ """Node configuration resolution — pure data extraction, no side effects. -Reads raw GraphNode data and produces typed config dataclasses. +Reads raw node data objects and produces typed config dataclasses. Does NOT resolve models, tools, skills, or middleware — those are handled by dedicated resolvers during the build phase. + +Node data objects are duck-typed: any object with ``id``, ``type``, and +``data`` attributes is accepted (previously GraphNode ORM instances, +now plain data holders from AgentVersion.definition_payload). """ from __future__ import annotations @@ -10,8 +14,6 @@ from dataclasses import dataclass, field from typing import Any, Dict, List, Optional -from app.models.graph import GraphNode - @dataclass class NodeConfig: @@ -64,8 +66,8 @@ def display_name(self) -> str: return self.label or self.name -def resolve_node_config(node: GraphNode, node_name: str) -> NodeConfig: - """Extract typed config from a GraphNode. Pure function, no side effects.""" +def resolve_node_config(node: Any, node_name: str) -> NodeConfig: + """Extract typed config from a node data object. Pure function, no side effects.""" data = node.data or {} config = data.get("config", {}) or {} node_type = data.get("type") or node.type or "agent" @@ -108,7 +110,7 @@ def resolve_node_config(node: GraphNode, node_name: str) -> NodeConfig: def resolve_all_configs( - nodes: list[GraphNode], + nodes: list[Any], edges: list, ) -> tuple[Optional[NodeConfig], list[NodeConfig]]: """Resolve configs for all nodes. Returns (root_config, child_configs). diff --git a/backend/app/core/graph/deep_agents/model_resolver.py b/backend/app/core/graph/deep_agents/model_resolver.py index 8b8916d9d..d5d3b715e 100644 --- a/backend/app/core/graph/deep_agents/model_resolver.py +++ b/backend/app/core/graph/deep_agents/model_resolver.py @@ -10,9 +10,8 @@ from loguru import logger -from app.common.exceptions import ModelConfigError +from app.common.app_errors import ModelConfigError from app.core.graph.deep_agents import format_node_ctx -from app.services.model_service import MODEL_NOT_FOUND class ModelResolver: @@ -78,7 +77,7 @@ async def _resolve_uncached( available = await self._list_available_model_names() ctx = format_node_ctx(node_label, graph_name) raise ModelConfigError( - MODEL_NOT_FOUND, + ModelConfigError.MODEL_NOT_FOUND, f'Model "{model_name}" is not available ({ctx}).', params={ "model": model_name or "", diff --git a/backend/app/core/graph/node_secrets.py b/backend/app/core/graph/node_secrets.py index 8981c0e67..327f4642a 100644 --- a/backend/app/core/graph/node_secrets.py +++ b/backend/app/core/graph/node_secrets.py @@ -1,10 +1,13 @@ """ -Graph node secrets: encrypt a2a_auth_headers and store by reference. +Graph node secrets: encrypt a2a_auth_headers and store inline. -- On save: if node has plain a2a_auth_headers, encrypt and store in graph_node_secrets, - replace in node.data.config with {"__secretRef": ""}. -- On load for execution: resolve __secretRef to decrypted headers (in-memory only). -- GET /state never returns decrypted headers; frontend sees __secretRef or redacted. +The GraphNode / GraphNodeSecret ORM models have been removed. +Node secrets are now stored inline in AgentVersion.definition_payload +under definition_payload["node_secrets"][str(node_id)] as an encrypted +string. No database table or UUID secret reference is involved. + +The helper functions below operate on plain dicts (node data payloads) +and are used by the graph builder at compile time. """ import copy @@ -12,11 +15,8 @@ from typing import Any, Dict, List, Optional from loguru import logger -from sqlalchemy import select -from sqlalchemy.ext.asyncio import AsyncSession from app.core.model.utils import decrypt_credentials, encrypt_credentials -from app.models.graph import GraphNode, GraphNodeSecret SECRET_KEY_SLUG = "a2a_auth_headers" REF_KEY = "__secretRef" @@ -38,45 +38,57 @@ def _normalize_headers(raw: Any) -> Optional[Dict[str, str]]: return None -async def store_a2a_auth_headers( - db: AsyncSession, - graph_id: uuid.UUID, - node_id: uuid.UUID, +def store_a2a_auth_headers( + payload_secrets: Dict[str, Any], + node_id: str | uuid.UUID, headers: Dict[str, str], -) -> uuid.UUID: - """Encrypt and store headers; return the secret row id (for __secretRef).""" +) -> None: + """Encrypt *headers* and store them inline in *payload_secrets*. + + ``payload_secrets`` is ``definition_payload["node_secrets"]`` — a plain + dict that lives inside AgentVersion.definition_payload. The encrypted + string is keyed by ``str(node_id)``. + + Example:: + + payload_secrets = definition_payload.setdefault("node_secrets", {}) + store_a2a_auth_headers(payload_secrets, node_id, headers) + """ if not headers: - raise ValueError("headers must be non-empty") + return encrypted = encrypt_credentials(headers) - row = GraphNodeSecret( - graph_id=graph_id, - node_id=node_id, - key_slug=SECRET_KEY_SLUG, - encrypted_value=encrypted, - ) - db.add(row) - await db.flush() - return row.id - - -async def resolve_a2a_auth_headers(db: AsyncSession, secret_id: uuid.UUID) -> Optional[Dict[str, str]]: - """Load and decrypt headers by secret id. Returns None if not found or invalid.""" - result = await db.execute(select(GraphNodeSecret).where(GraphNodeSecret.id == secret_id)) - row = result.scalar_one_or_none() - if not row or not row.encrypted_value: + payload_secrets[str(node_id)] = encrypted + logger.debug(f"[NodeSecrets] Stored encrypted a2a_auth_headers for node_id={node_id}") + + +def resolve_a2a_auth_headers( + payload_secrets: Dict[str, Any], + node_id: str | uuid.UUID, +) -> Optional[Dict[str, str]]: + """Decrypt and return a2a_auth_headers for *node_id* from *payload_secrets*. + + ``payload_secrets`` is ``definition_payload.get("node_secrets", {})``. + Returns ``None`` when no secret is stored for the given node. + """ + encrypted = payload_secrets.get(str(node_id)) + if not encrypted: return None try: - decrypted = decrypt_credentials(row.encrypted_value) - return {str(k): str(v) for k, v in decrypted.items()} if isinstance(decrypted, dict) else None - except Exception as e: - logger.warning(f"[NodeSecrets] Failed to decrypt secret {secret_id}: {e}") + result = decrypt_credentials(encrypted) + return {str(k): str(v) for k, v in result.items()} if isinstance(result, dict) else None + except Exception as exc: + logger.warning(f"[NodeSecrets] Failed to decrypt a2a_auth_headers for node_id={node_id}: {exc}") return None def prepare_node_data_for_save(node_data: Dict[str, Any]) -> tuple[Dict[str, Any], Optional[Dict[str, str]]]: """ If node has plain a2a_auth_headers, return (data_copy_without_plain_headers, headers_to_store). - Caller must: store the secret, then set data_copy["config"]["a2a_auth_headers"] = {"__secretRef": str(secret_id)}. + + Caller must call ``store_a2a_auth_headers(payload_secrets, node_id, headers_to_store)`` + after receiving the returned headers dict, then set:: + + data_copy["config"]["a2a_auth_headers"] = {"__secretRef": str(node_id)} """ data_copy = copy.deepcopy(node_data) config = (data_copy.get("config") or {}) if isinstance(data_copy.get("config"), dict) else {} @@ -90,27 +102,35 @@ def prepare_node_data_for_save(node_data: Dict[str, Any]) -> tuple[Dict[str, Any return data_copy, headers -async def hydrate_nodes_a2a_secrets(db: AsyncSession, nodes: List[GraphNode]) -> None: - """Resolve __secretRef in each node's data.config.a2a_auth_headers in-place (for execution only).""" +def hydrate_nodes_a2a_secrets(payload_secrets: Dict[str, Any], nodes: List[Any]) -> None: + """Resolve ``__secretRef`` in each node's a2a_auth_headers in-place (for execution only). + + Accepts either plain dicts (from definition_payload["nodes"]) or any + object with a ``data`` dict attribute. + + ``payload_secrets`` is ``definition_payload.get("node_secrets", {})``. + """ for node in nodes: - data = node.data or {} + # Support both plain dicts and data-holder objects. + if isinstance(node, dict): + node_id = node.get("id", "") + data = node.get("data") or {} + else: + node_id = getattr(node, "id", "") + data = getattr(node, "data", None) or {} + config = data.get("config") or {} raw = config.get("a2a_auth_headers") if not isinstance(raw, dict) or REF_KEY not in raw: continue - ref = raw.get(REF_KEY) - if not ref: - continue - try: - secret_uuid = uuid.UUID(str(ref)) - except (ValueError, TypeError): - continue - resolved = await resolve_a2a_auth_headers(db, secret_uuid) - if resolved is not None: - config["a2a_auth_headers"] = resolved - if "config" not in data: - data["config"] = config - node.data = data + + resolved = resolve_a2a_auth_headers(payload_secrets, node_id) + config["a2a_auth_headers"] = resolved if resolved is not None else {} + + # Write back — handle both dict nodes and object nodes. + if "config" not in data: + data["config"] = config + if isinstance(node, dict): + node["data"] = data else: - config["a2a_auth_headers"] = {} node.data = data diff --git a/backend/app/core/graph/runtime_prompt_template.py b/backend/app/core/graph/runtime_prompt_template.py index b7733a99d..ce1971565 100644 --- a/backend/app/core/graph/runtime_prompt_template.py +++ b/backend/app/core/graph/runtime_prompt_template.py @@ -4,8 +4,6 @@ from collections.abc import Mapping from typing import Any -from app.models.graph import AgentGraph - _PLACEHOLDER_PATTERN = re.compile(r"\{([A-Za-z_][A-Za-z0-9_]*)\}") _PROMPT_CONFIG_KEYS = ("systemPrompt", "system_prompt", "prompt") @@ -35,7 +33,7 @@ def extract_runtime_template_variables(text: str | None) -> set[str]: def build_runtime_prompt_context( - graph: AgentGraph, + graph: Any, *, user_id: Any | None, thread_id: str | None, diff --git a/backend/app/core/observation/__init__.py b/backend/app/core/observation/__init__.py new file mode 100644 index 000000000..ed366f766 --- /dev/null +++ b/backend/app/core/observation/__init__.py @@ -0,0 +1,15 @@ +"""Observation tracing — Langfuse-aligned trace tree for in-product agent debugging.""" + +from app.core.observation.collector import ObservationCollector +from app.core.observation.model import Observation, Trace +from app.core.observation.otel.span_wrapper import ObservationSpan +from app.core.observation.types import ObservationLevel, ObservationType + +__all__ = [ + "Observation", + "ObservationCollector", + "ObservationLevel", + "ObservationSpan", + "ObservationType", + "Trace", +] diff --git a/backend/app/core/observation/collector.py b/backend/app/core/observation/collector.py new file mode 100644 index 000000000..21414423e --- /dev/null +++ b/backend/app/core/observation/collector.py @@ -0,0 +1,195 @@ +"""ObservationCollector — OTel-backed central API for observation tracing.""" + +from __future__ import annotations + +import asyncio +import uuid +from typing import Any, Callable, Coroutine + +import sqlalchemy as sa +from loguru import logger + +from app.core.observation.instrumentation.langchain_handler import ( + ObservationCallbackHandler, +) +from app.core.observation.model import Trace +from app.core.observation.otel.provider import ObservationTracerProvider +from app.core.observation.otel.span_wrapper import ObservationSpan +from app.core.observation.types import ObservationLevel, ObservationType +from app.utils.datetime import utc_now + + +class ObservationCollector: + def __init__( + self, + trace_id: uuid.UUID, + execution_id: uuid.UUID, + workspace_id: uuid.UUID, + db_session_factory: Callable[..., Coroutine[Any, Any, Any]], + broadcast_fn: Callable[..., Coroutine[Any, Any, None]] | None = None, + ) -> None: + loop = asyncio.get_running_loop() + self._provider = ObservationTracerProvider( + execution_id=execution_id, + trace_id=trace_id, + workspace_id=workspace_id, + db_session_factory=db_session_factory, + broadcast_fn=broadcast_fn, + event_loop=loop, + ) + self._tracer = self._provider.get_tracer() + self._trace_id = trace_id + self._db_session_factory = db_session_factory + + def start_span( + self, + obs_type: ObservationType, + name: str, + *, + parent: ObservationSpan | None = None, + input: Any = None, + metadata: dict | None = None, + level: ObservationLevel = ObservationLevel.DEFAULT, + ) -> ObservationSpan: + obs_id = uuid.uuid4() + + parent_ctx = None + if parent: + parent_ctx = parent.get_context() + + otel_span = self._tracer.start_span( + name, + context=parent_ctx, + attributes={ + "observation.id": str(obs_id), + "observation.type": obs_type.value, + "observation.level": level.value, + }, + ) + + obs = ObservationSpan(otel_span, obs_id, self._provider) + + if input is not None: + obs.set_input(input) + if metadata: + obs.set_metadata(metadata) + + return obs + + def start_agent(self, name: str, **kw: Any) -> ObservationSpan: + return self.start_span(ObservationType.AGENT, name, **kw) + + def child_span( + self, + parent: ObservationSpan, + obs_type: ObservationType, + name: str, + *, + input: Any = None, + **kw: Any, + ) -> ObservationSpan: + return self.start_span(obs_type, name, parent=parent, input=input, **kw) + + def record_generation( + self, + name: str, + *, + parent: ObservationSpan | None = None, + input: Any = None, + output: Any = None, + model: str | None = None, + usage_details: dict | None = None, + cost_details: dict | None = None, + metadata: dict | None = None, + level: ObservationLevel = ObservationLevel.DEFAULT, + ) -> ObservationSpan: + span = self.start_span( + ObservationType.GENERATION, + name, + parent=parent, + input=input, + metadata=metadata, + level=level, + ) + if output is not None: + span.set_output(output) + if model: + span.set_model(model) + if usage_details: + span.set_usage(usage_details) + if cost_details: + span.set_cost(cost_details) + span.end() + return span + + def record_tool( + self, + name: str, + *, + parent: ObservationSpan | None = None, + input: Any = None, + output: Any = None, + metadata: dict | None = None, + level: ObservationLevel = ObservationLevel.DEFAULT, + ) -> ObservationSpan: + span = self.start_span( + ObservationType.TOOL, + name, + parent=parent, + input=input, + metadata=metadata, + level=level, + ) + if output is not None: + span.set_output(output) + span.end() + return span + + def record_event( + self, + name: str, + *, + parent: ObservationSpan | None = None, + input: Any = None, + metadata: dict | None = None, + level: ObservationLevel = ObservationLevel.DEFAULT, + ) -> ObservationSpan: + span = self.start_span( + ObservationType.EVENT, + name, + parent=parent, + input=input, + metadata=metadata, + level=level, + ) + span.end() + return span + + def create_langchain_handler(self) -> ObservationCallbackHandler: + return ObservationCallbackHandler(self._tracer, self._provider) + + async def finalize(self, status: str = "complete") -> None: + agg = self._provider.get_persistence_aggregates() + final_status = "error" if agg["has_error"] else status + self._provider.broadcast_trace_complete(final_status, agg) + await self._provider.shutdown() + await self._update_trace_row(final_status, agg) + + async def _update_trace_row(self, status: str, agg: dict) -> None: + try: + session = await self._db_session_factory() + now = utc_now() + await session.execute( + sa.update(Trace) + .where(Trace.id == self._trace_id) + .values( + status=status, + end_time=now, + total_observations=agg["total_observations"], + total_tokens=agg["total_tokens"], + total_cost=agg["total_cost"], + ) + ) + await session.commit() + except Exception: + logger.opt(exception=True).warning("Failed to update Trace row") diff --git a/backend/app/core/observation/instrumentation/__init__.py b/backend/app/core/observation/instrumentation/__init__.py new file mode 100644 index 000000000..17ab3bc17 --- /dev/null +++ b/backend/app/core/observation/instrumentation/__init__.py @@ -0,0 +1,2 @@ +# backend/app/core/observation/instrumentation/__init__.py +"""Instrumentation helpers that bridge engine-specific data into ObservationCollector.""" diff --git a/backend/app/core/observation/instrumentation/cli_extractor.py b/backend/app/core/observation/instrumentation/cli_extractor.py new file mode 100644 index 000000000..61cde75fc --- /dev/null +++ b/backend/app/core/observation/instrumentation/cli_extractor.py @@ -0,0 +1,85 @@ +"""CLI message stream → observation extractor for CLI engines.""" + +from __future__ import annotations + +from app.core.agent.cli_backends.base import CLIMessage +from app.core.observation.collector import ObservationCollector +from app.core.observation.otel.span_wrapper import ObservationSpan +from app.core.observation.types import ObservationType + +FILE_TOOLS = frozenset( + { + "read_file", + "write_file", + "create_file", + "edit_file", + "Read", + "Write", + "Edit", + "Glob", + "Grep", + } +) + + +class CLIObservationExtractor: + def __init__(self, collector: ObservationCollector, root_span: ObservationSpan): + self._collector = collector + self._root = root_span + self._text_buffer: list[str] = [] + self._current_tool_span: ObservationSpan | None = None + self._current_usage: dict | None = None + + async def process_message(self, msg: CLIMessage) -> None: + match msg.type: + case "text": + self._text_buffer.append(msg.content or "") + + case "tool_use": + await self._flush_generation() + tool_name = msg.tool_name or msg.tool or msg.content or "tool" + tool_input = msg.tool_input or msg.input or {} + self._current_tool_span = self._collector.child_span( + self._root, + ObservationType.TOOL, + name=tool_name, + input={"arguments": tool_input}, + ) + if tool_name in FILE_TOOLS: + path = tool_input.get("path", tool_input.get("file_path", "")) + op = "read" if "read" in tool_name.lower() or tool_name in ("Read", "Glob", "Grep") else "write" + self._collector.record_event( + f"file:{op} {path}", + parent=self._current_tool_span, + metadata={"file.path": path, "file.operation": op}, + ) + + case "tool_result": + if self._current_tool_span: + self._current_tool_span.set_output({"result": msg.content}) + self._current_tool_span.end() + self._current_tool_span = None + + case "usage": + self._current_usage = msg.usage + + async def flush_pending(self) -> None: + await self._flush_generation() + + async def _flush_generation(self) -> None: + if not self._text_buffer: + return + text = "".join(self._text_buffer) + self._text_buffer.clear() + usage = self._current_usage or {} + self._current_usage = None + + self._collector.record_generation( + "cli-generation", + parent=self._root, + input=None, + output={"completion": text}, + model=None, + usage_details=usage if usage else None, + cost_details=None, + ) diff --git a/backend/app/core/observation/instrumentation/copilot_extractor.py b/backend/app/core/observation/instrumentation/copilot_extractor.py new file mode 100644 index 000000000..d6fba0a41 --- /dev/null +++ b/backend/app/core/observation/instrumentation/copilot_extractor.py @@ -0,0 +1,40 @@ +"""Copilot stream → observation extractor.""" + +from __future__ import annotations + +from app.core.observation.collector import ObservationCollector +from app.core.observation.otel.span_wrapper import ObservationSpan + + +class CopilotObservationExtractor: + def __init__( + self, + collector: ObservationCollector, + model_name: str, + parent_span: ObservationSpan | None = None, + ): + self._collector = collector + self._model_name = model_name + self._parent_span = parent_span + self._chunks: list[str] = [] + + def accumulate(self, content: str) -> None: + self._chunks.append(content) + + async def flush( + self, + *, + prompt: str, + mode: str, + elapsed_ms: float, + usage_details: dict | None = None, + ) -> None: + self._collector.record_generation( + f"copilot:{self._model_name}", + parent=self._parent_span, + input={"prompt": prompt, "mode": mode}, + output={"completion": "".join(self._chunks)}, + model=self._model_name, + usage_details=usage_details, + cost_details=None, + ) diff --git a/backend/app/core/observation/instrumentation/file_tracker.py b/backend/app/core/observation/instrumentation/file_tracker.py new file mode 100644 index 000000000..8d524c654 --- /dev/null +++ b/backend/app/core/observation/instrumentation/file_tracker.py @@ -0,0 +1,40 @@ +"""File operation → EVENT observation tracker.""" + +from __future__ import annotations + +from app.core.observation.collector import ObservationCollector +from app.core.observation.otel.span_wrapper import ObservationSpan + + +class FileOperationTracker: + def __init__( + self, + collector: ObservationCollector, + parent_span: ObservationSpan | None = None, + ): + self._collector = collector + self._parent_span = parent_span + + async def track_write(self, path: str, content: bytes | str) -> None: + size, preview = self._byte_len(content), None + if isinstance(content, str): + preview = content[:200] + else: + preview = content[:200].decode(errors="replace") + await self._track(path, "write", size, content_preview=preview) + + async def track_read(self, path: str, content: bytes | str) -> None: + await self._track(path, "read", self._byte_len(content)) + + async def _track(self, path: str, operation: str, size: int, **extra: str | None) -> None: + meta: dict = {"file.path": path, "file.operation": operation, "file.size_bytes": size} + meta.update({k: v for k, v in extra.items() if v is not None}) + self._collector.record_event( + f"file:{operation} {path}", + parent=self._parent_span, + metadata=meta, + ) + + @staticmethod + def _byte_len(content: bytes | str) -> int: + return len(content.encode() if isinstance(content, str) else content) diff --git a/backend/app/core/observation/instrumentation/langchain_handler.py b/backend/app/core/observation/instrumentation/langchain_handler.py new file mode 100644 index 000000000..5665c7903 --- /dev/null +++ b/backend/app/core/observation/instrumentation/langchain_handler.py @@ -0,0 +1,614 @@ +"""LangChain async callback handler — maps all 18 hooks to OTel observation spans.""" + +from __future__ import annotations + +import json +import time +import uuid +from dataclasses import dataclass, field +from datetime import datetime, timezone +from typing import Any, Sequence + +from langchain_core.callbacks import AsyncCallbackHandler +from langchain_core.messages import BaseMessage +from loguru import logger +from opentelemetry.trace import Tracer + +from app.core.observation.instrumentation.langchain_utils import ( + _classify_chain, + extract_model_name, +) +from app.core.observation.otel.provider import ObservationTracerProvider +from app.core.observation.otel.span_wrapper import ObservationSpan +from app.core.observation.types import ObservationLevel, ObservationType +from app.utils.message_serializer import serialize_message +from app.utils.token_usage import extract_usage_from_llm_result + + +def _safe_json(obj: Any) -> Any: + if obj is None or isinstance(obj, (str, int, float, bool)): + return obj + if isinstance(obj, dict): + return {k: _safe_json(v) for k, v in obj.items()} + if isinstance(obj, (list, tuple)): + return [_safe_json(v) for v in obj] + if hasattr(obj, "model_dump"): + return _safe_json(obj.model_dump()) + if hasattr(obj, "dict"): + return _safe_json(obj.dict()) + try: + json.dumps(obj) + return obj + except (TypeError, ValueError): + return str(obj) + + +@dataclass +class RunState: + parent_run_id: uuid.UUID | None + root_run_id: uuid.UUID + + +@dataclass +class RootRunState: + run_ids: set[uuid.UUID] = field(default_factory=set) + + +_FLUSH_INTERVAL_S = 0.1 + +_MODEL_PARAMETER_KEYS = ( + "temperature", + "max_tokens", + "max_completion_tokens", + "top_p", + "frequency_penalty", + "presence_penalty", + "request_timeout", + "stop", + "stop_sequences", +) + + +def _parse_model_parameters(kwargs: dict[str, Any]) -> dict[str, Any] | None: + inv = kwargs.get("invocation_params") + if not inv or not isinstance(inv, dict): + return None + params = {k: inv[k] for k in _MODEL_PARAMETER_KEYS if inv.get(k) is not None} + return params or None + + +class _TokenBuffer: + __slots__ = ("_parts", "_accumulated", "_last_flush", "_dirty") + + def __init__(self) -> None: + self._parts: list[str] = [] + self._accumulated = "" + self._last_flush = time.monotonic() + self._dirty = False + + def append(self, token: str) -> str | None: + self._parts.append(token) + self._dirty = True + now = time.monotonic() + if now - self._last_flush >= _FLUSH_INTERVAL_S: + self._last_flush = now + self._accumulated = self._accumulated + "".join(self._parts) + self._parts.clear() + return self._accumulated + return None + + def drain(self) -> str | None: + if not self._dirty: + return None + if self._parts: + self._accumulated = self._accumulated + "".join(self._parts) + self._parts.clear() + return self._accumulated + + +class ObservationCallbackHandler(AsyncCallbackHandler): + def __init__(self, tracer: Tracer, provider: ObservationTracerProvider) -> None: + self._tracer = tracer + self._provider = provider + self._runs: dict[uuid.UUID, ObservationSpan] = {} + self._run_states: dict[uuid.UUID, RunState] = {} + self._root_run_states: dict[uuid.UUID, RootRunState] = {} + self._completion_start_memo: set[uuid.UUID] = set() + self._token_buffers: dict[uuid.UUID, _TokenBuffer] = {} + self._prompt_to_parent: dict[uuid.UUID, Any] = {} + + # --- run tree --- + + def _track_run(self, run_id: uuid.UUID, parent_run_id: uuid.UUID | None) -> None: + if run_id in self._run_states: + return + if parent_run_id is None or parent_run_id not in self._run_states: + root = run_id + self._root_run_states[root] = RootRunState() + else: + root = self._run_states[parent_run_id].root_run_id + self._run_states[run_id] = RunState(parent_run_id, root) + self._root_run_states[root].run_ids.add(run_id) + + def _is_root(self, run_id: uuid.UUID) -> bool: + state = self._run_states.get(run_id) + return state is not None and state.root_run_id == run_id + + # --- OTel context --- + + def _start_obs_span( + self, + run_id: uuid.UUID, + name: str, + obs_type: ObservationType, + parent_run_id: uuid.UUID | None = None, + ) -> ObservationSpan: + obs_id = uuid.uuid4() + + parent_ctx = None + if parent_run_id and parent_run_id in self._runs: + parent_span = self._runs[parent_run_id] + parent_ctx = parent_span.get_context() + + otel_span = self._tracer.start_span( + name, + context=parent_ctx, + attributes={ + "observation.id": str(obs_id), + "observation.type": obs_type.value, + "observation.level": ObservationLevel.DEFAULT.value, + }, + ) + obs = ObservationSpan(otel_span, obs_id, self._provider) + self._runs[run_id] = obs + return obs + + def _detach_span(self, run_id: uuid.UUID) -> ObservationSpan | None: + return self._runs.pop(run_id, None) + + def _reset(self, root_run_id: uuid.UUID) -> None: + state = self._root_run_states.pop(root_run_id, None) + if state: + for rid in state.run_ids: + self._run_states.pop(rid, None) + + # --- chain hooks --- + + async def on_chain_start( + self, + serialized: dict[str, Any] | None, + inputs: dict[str, Any], + *, + run_id: uuid.UUID, + parent_run_id: uuid.UUID | None = None, + tags: list[str] | None = None, + metadata: dict[str, Any] | None = None, + **kwargs: Any, + ) -> None: + try: + self._track_run(run_id, parent_run_id) + name = (serialized or {}).get("name", "") or kwargs.get("name", "chain") + obs_type = _classify_chain(name, serialized or {}) + obs = self._start_obs_span(run_id, name, obs_type, parent_run_id) + obs.set_input(_safe_json(inputs)) + if metadata: + obs.set_metadata(metadata) + prompt = metadata.get("langfuse_prompt") + if prompt: + self._prompt_to_parent[run_id] = prompt + except Exception: + logger.opt(exception=True).debug("on_chain_start failed") + + async def on_chain_end( + self, + outputs: dict[str, Any], + *, + run_id: uuid.UUID, + **kwargs: Any, + ) -> None: + try: + obs = self._detach_span(run_id) + if obs: + obs.set_output(_safe_json(outputs)) + obs.end() + if self._is_root(run_id): + self._reset(run_id) + except Exception: + logger.opt(exception=True).debug("on_chain_end failed") + + async def on_chain_error( + self, + error: BaseException, + *, + run_id: uuid.UUID, + **kwargs: Any, + ) -> None: + try: + obs = self._detach_span(run_id) + if obs: + obs.set_level(ObservationLevel.ERROR) + obs.set_status_message(str(error)) + obs.end() + if self._is_root(run_id): + self._reset(run_id) + except Exception: + logger.opt(exception=True).debug("on_chain_error failed") + + # --- LLM hooks --- + + def _apply_llm_attributes( + self, + obs: ObservationSpan, + run_id: uuid.UUID, + parent_run_id: uuid.UUID | None, + *, + serialized: dict[str, Any] | None, + metadata: dict[str, Any] | None, + kwargs: dict[str, Any], + ) -> None: + model = extract_model_name( + metadata=metadata, + serialized=serialized, + kwargs=kwargs, + response=None, + ) + if model: + obs.set_model(model) + + model_params = _parse_model_parameters(kwargs) + if model_params: + obs.set_model_parameters(model_params) + + inv_params = kwargs.get("invocation_params") + if inv_params and isinstance(inv_params, dict): + tools = inv_params.get("tools") + if tools and isinstance(tools, list): + obs.set_tool_definitions(_safe_json(tools)) + + if metadata: + obs.set_metadata(metadata) + + self._maybe_link_prompt(run_id, parent_run_id, obs) + + async def on_chat_model_start( + self, + serialized: dict[str, Any] | None, + messages: list[list[BaseMessage]], + *, + run_id: uuid.UUID, + parent_run_id: uuid.UUID | None = None, + tags: list[str] | None = None, + metadata: dict[str, Any] | None = None, + **kwargs: Any, + ) -> None: + try: + self._track_run(run_id, parent_run_id) + name = (serialized or {}).get("name", "") or kwargs.get("name", "chat_model") + + input_msgs: list[dict[str, Any]] = [] + for msg_list in messages: + input_msgs.extend(serialize_message(m) for m in msg_list) + + obs = self._start_obs_span(run_id, name, ObservationType.GENERATION, parent_run_id) + obs.set_input(input_msgs) + + self._apply_llm_attributes( + obs, + run_id, + parent_run_id, + serialized=serialized, + metadata=metadata, + kwargs=kwargs, + ) + except Exception: + logger.opt(exception=True).debug("on_chat_model_start failed") + + async def on_llm_start( + self, + serialized: dict[str, Any] | None, + prompts: list[str], + *, + run_id: uuid.UUID, + parent_run_id: uuid.UUID | None = None, + tags: list[str] | None = None, + metadata: dict[str, Any] | None = None, + **kwargs: Any, + ) -> None: + try: + self._track_run(run_id, parent_run_id) + name = (serialized or {}).get("name", "") or kwargs.get("name", "llm") + obs = self._start_obs_span(run_id, name, ObservationType.GENERATION, parent_run_id) + obs.set_input(prompts[0] if len(prompts) == 1 else prompts) + + self._apply_llm_attributes( + obs, + run_id, + parent_run_id, + serialized=serialized or {}, + metadata=metadata, + kwargs=kwargs, + ) + except Exception: + logger.opt(exception=True).debug("on_llm_start failed") + + async def on_llm_end( + self, + response: Any, + *, + run_id: uuid.UUID, + **kwargs: Any, + ) -> None: + try: + buf = self._token_buffers.pop(run_id, None) + obs = self._detach_span(run_id) + if not obs: + return + + if buf: + final_text = buf.drain() + if final_text is not None: + obs.flush_streaming_text(final_text) + + output: dict[str, Any] = {} + if hasattr(response, "generations") and response.generations: + gen_list = response.generations[0] + if gen_list: + gen = gen_list[0] + if hasattr(gen, "message"): + output = serialize_message(gen.message) + msg = gen.message + tool_calls = getattr(msg, "tool_calls", None) + if tool_calls: + obs.set_tool_calls(_safe_json(tool_calls)) + elif hasattr(gen, "text"): + output = {"completion": gen.text} + + usage = extract_usage_from_llm_result(response) + if usage: + obs.set_usage(usage) + + if hasattr(response, "llm_output") and response.llm_output: + model_from_response = response.llm_output.get("model_name") + if model_from_response: + obs.set_model(model_from_response) + + obs.set_output(output) + obs.end() + self._completion_start_memo.discard(run_id) + except Exception: + logger.opt(exception=True).debug("on_llm_end failed") + + async def on_llm_error( + self, + error: BaseException, + *, + run_id: uuid.UUID, + **kwargs: Any, + ) -> None: + try: + self._token_buffers.pop(run_id, None) + obs = self._detach_span(run_id) + if obs: + obs.set_level(ObservationLevel.ERROR) + obs.set_status_message(str(error)) + obs.end() + self._completion_start_memo.discard(run_id) + except Exception: + logger.opt(exception=True).debug("on_llm_error failed") + + async def on_llm_new_token( + self, + token: str, + *, + run_id: uuid.UUID, + chunk: Any | None = None, + **kwargs: Any, + ) -> None: + try: + obs = self._runs.get(run_id) + if not obs: + return + if run_id not in self._completion_start_memo: + self._completion_start_memo.add(run_id) + obs.set_completion_start_time(datetime.now(tz=timezone.utc)) + buf = self._token_buffers.get(run_id) + if buf is None: + buf = _TokenBuffer() + self._token_buffers[run_id] = buf + text = buf.append(token) + if text is not None: + obs.flush_streaming_text(text) + except Exception: + logger.opt(exception=True).debug("on_llm_new_token failed") + + # --- tool hooks --- + + async def on_tool_start( + self, + serialized: dict[str, Any] | None, + input_str: str, + *, + run_id: uuid.UUID, + parent_run_id: uuid.UUID | None = None, + tags: list[str] | None = None, + metadata: dict[str, Any] | None = None, + **kwargs: Any, + ) -> None: + try: + self._track_run(run_id, parent_run_id) + name = (serialized or {}).get("name", "") or kwargs.get("name", "tool") + obs = self._start_obs_span(run_id, name, ObservationType.TOOL, parent_run_id) + obs.set_input({"arguments": input_str}) + if metadata: + obs.set_metadata(metadata) + except Exception: + logger.opt(exception=True).debug("on_tool_start failed") + + async def on_tool_end( + self, + output: str, + *, + run_id: uuid.UUID, + **kwargs: Any, + ) -> None: + try: + obs = self._detach_span(run_id) + if obs: + obs.set_output({"result": output}) + obs.end() + except Exception: + logger.opt(exception=True).debug("on_tool_end failed") + + async def on_tool_error( + self, + error: BaseException, + *, + run_id: uuid.UUID, + **kwargs: Any, + ) -> None: + try: + obs = self._detach_span(run_id) + if obs: + obs.set_level(ObservationLevel.ERROR) + obs.set_status_message(str(error)) + obs.end() + except Exception: + logger.opt(exception=True).debug("on_tool_error failed") + + # --- retriever hooks --- + + async def on_retriever_start( + self, + serialized: dict[str, Any] | None, + query: str, + *, + run_id: uuid.UUID, + parent_run_id: uuid.UUID | None = None, + tags: list[str] | None = None, + metadata: dict[str, Any] | None = None, + **kwargs: Any, + ) -> None: + try: + self._track_run(run_id, parent_run_id) + name = (serialized or {}).get("name", "") or kwargs.get("name", "retriever") + obs = self._start_obs_span(run_id, name, ObservationType.RETRIEVER, parent_run_id) + obs.set_input({"query": query}) + if metadata: + obs.set_metadata(metadata) + except Exception: + logger.opt(exception=True).debug("on_retriever_start failed") + + async def on_retriever_end( + self, + documents: Sequence[Any], + *, + run_id: uuid.UUID, + **kwargs: Any, + ) -> None: + try: + obs = self._detach_span(run_id) + if obs: + docs_out = [_safe_json(d) for d in documents] + obs.set_output({"documents": docs_out}) + obs.end() + except Exception: + logger.opt(exception=True).debug("on_retriever_end failed") + + async def on_retriever_error( + self, + error: BaseException, + *, + run_id: uuid.UUID, + **kwargs: Any, + ) -> None: + try: + obs = self._detach_span(run_id) + if obs: + obs.set_level(ObservationLevel.ERROR) + obs.set_status_message(str(error)) + obs.end() + except Exception: + logger.opt(exception=True).debug("on_retriever_error failed") + + # --- agent hooks --- + + async def on_agent_action( + self, + action: Any, + *, + run_id: uuid.UUID, + **kwargs: Any, + ) -> None: + try: + obs = self._runs.get(run_id) + if obs: + obs.set_observation_type(ObservationType.AGENT) + log = _safe_json(getattr(action, "log", str(action))) + obs.add_intermediate_update({"type": ObservationType.AGENT.value, "action_log": log}) + except Exception: + logger.opt(exception=True).debug("on_agent_action failed") + + async def on_agent_finish( + self, + finish: Any, + *, + run_id: uuid.UUID, + **kwargs: Any, + ) -> None: + try: + obs = self._runs.get(run_id) + if obs: + return_values = _safe_json(getattr(finish, "return_values", str(finish))) + obs.set_output(return_values) + except Exception: + logger.opt(exception=True).debug("on_agent_finish failed") + + # --- misc hooks --- + + async def on_retry( + self, + retry_state: Any, + *, + run_id: uuid.UUID, + **kwargs: Any, + ) -> None: + try: + obs = self._runs.get(run_id) + if obs: + obs.add_event( + "retry", + { + "attempt": str(getattr(retry_state, "attempt_number", "?")), + "error": str(getattr(retry_state, "outcome", "")), + }, + ) + except Exception: + logger.opt(exception=True).debug("on_retry failed") + + async def on_text( + self, + text: str, + *, + run_id: uuid.UUID, + **kwargs: Any, + ) -> None: + pass # Ignored — info covered by other hooks + + # --- prompt linkage helper --- + + def _maybe_link_prompt( + self, + run_id: uuid.UUID, + parent_run_id: uuid.UUID | None, + obs: ObservationSpan, + ) -> None: + current = parent_run_id + while current: + prompt = self._prompt_to_parent.pop(current, None) + if prompt: + name = getattr(prompt, "name", str(prompt)) + version = str(getattr(prompt, "version", "")) + obs.set_prompt(name, version or None) + return + state = self._run_states.get(current) + current = state.parent_run_id if state else None diff --git a/backend/app/core/observation/instrumentation/langchain_utils.py b/backend/app/core/observation/instrumentation/langchain_utils.py new file mode 100644 index 000000000..108ab7b8d --- /dev/null +++ b/backend/app/core/observation/instrumentation/langchain_utils.py @@ -0,0 +1,94 @@ +"""LangChain callback helper utilities — message conversion, usage normalization, model extraction.""" + +from __future__ import annotations + +from typing import Any + +from langchain_core.messages import BaseMessage + +from app.core.observation.types import ObservationType + +MESSAGE_ROLE_MAP: dict[str, str | None] = { + "HumanMessage": "user", + "AIMessage": "assistant", + "SystemMessage": "system", + "ToolMessage": "tool", + "FunctionMessage": "function", + "ChatMessage": None, +} + +USAGE_KEY_MAP: list[tuple[str, str, str | None]] = [ + ("prompt_tokens", "completion_tokens", "total_tokens"), + ("input_tokens", "output_tokens", None), + ("promptTokenCount", "candidatesTokenCount", "totalTokenCount"), + ("inputTokens", "outputTokens", "totalTokens"), +] + + +def convert_message_to_dict(message: BaseMessage) -> dict: + """Convert a LangChain BaseMessage to a plain dict with role/content/tool_calls.""" + role = MESSAGE_ROLE_MAP.get(type(message).__name__, "unknown") + if role is None: + role = getattr(message, "role", "unknown") + result: dict[str, Any] = {"role": role, "content": message.content} + if hasattr(message, "tool_calls") and message.tool_calls: + result["tool_calls"] = message.tool_calls + if hasattr(message, "tool_call_id") and message.tool_call_id: + result["tool_call_id"] = message.tool_call_id + if message.additional_kwargs: + result.update(message.additional_kwargs) + return result + + +def normalize_usage(raw: dict | None) -> dict[str, int]: + """Normalize token usage across provider formats to {input, output, total}.""" + if not raw: + return {} + for input_key, output_key, total_key in USAGE_KEY_MAP: + if input_key in raw: + inp = int(raw[input_key]) + out = int(raw.get(output_key, 0)) + total = int(raw[total_key]) if total_key and total_key in raw else inp + out + return {"input": inp, "output": out, "total": total} + return {} + + +def extract_model_name( + *, + metadata: dict | None, + serialized: dict | None, + kwargs: dict, + response: Any | None, +) -> str | None: + """Multi-source model name extraction: metadata -> serialized -> invocation_params -> response.""" + if metadata and metadata.get("ls_model_name"): + return str(metadata["ls_model_name"]) + + ser_kwargs = (serialized or {}).get("kwargs", {}) + if ser_kwargs.get("model_name"): + return str(ser_kwargs["model_name"]) + if ser_kwargs.get("model"): + return str(ser_kwargs["model"]) + + inv_params = kwargs.get("invocation_params", {}) + if inv_params.get("model_name"): + return str(inv_params["model_name"]) + if inv_params.get("model"): + return str(inv_params["model"]) + + if response and hasattr(response, "llm_output") and response.llm_output: + if response.llm_output.get("model_name"): + return str(response.llm_output["model_name"]) + + return None + + +def _classify_chain(name: str, serialized: dict) -> ObservationType: + """Determine if a chain is actually an AGENT based on name patterns and serialized id path.""" + if name and (name.startswith("worker:") or "SubAgent" in name or "CompiledSubAgent" in name): + return ObservationType.AGENT + if serialized: + path = serialized.get("id", []) + if any("agent" in seg.lower() for seg in path if isinstance(seg, str)): + return ObservationType.AGENT + return ObservationType.CHAIN diff --git a/backend/app/core/observation/model.py b/backend/app/core/observation/model.py new file mode 100644 index 000000000..5ec04cbb5 --- /dev/null +++ b/backend/app/core/observation/model.py @@ -0,0 +1,100 @@ +# backend/app/core/observation/model.py +"""Trace and Observation persistence models — Langfuse-aligned schema.""" + +from __future__ import annotations + +import uuid +from datetime import datetime +from decimal import Decimal + +from sqlalchemy import ( + ARRAY, + Boolean, + DateTime, + ForeignKey, + Integer, + Numeric, + String, + Text, + func, +) +from sqlalchemy.dialects.postgresql import JSONB, UUID +from sqlalchemy.orm import Mapped, mapped_column + +from app.core.database import Base + + +class Trace(Base): + __tablename__ = "traces" + + id: Mapped[uuid.UUID] = mapped_column(UUID(as_uuid=True), primary_key=True) + name: Mapped[str] = mapped_column(String(255), nullable=False) + workspace_id: Mapped[uuid.UUID] = mapped_column(UUID(as_uuid=True), nullable=False) + + start_time: Mapped[datetime] = mapped_column(DateTime(timezone=True), nullable=False) + end_time: Mapped[datetime | None] = mapped_column(DateTime(timezone=True), nullable=True) + status: Mapped[str] = mapped_column(String(20), nullable=False, server_default="running") + + input: Mapped[dict | None] = mapped_column(JSONB, nullable=True) + output: Mapped[dict | None] = mapped_column(JSONB, nullable=True) + meta: Mapped[dict | None] = mapped_column("metadata", JSONB, nullable=True) + + environment: Mapped[str] = mapped_column(String(50), server_default="debug") + tags: Mapped[list[str] | None] = mapped_column(ARRAY(String), server_default="{}") + release: Mapped[str | None] = mapped_column(String(255), nullable=True) + version: Mapped[str | None] = mapped_column(String(100), nullable=True) + session_id: Mapped[str | None] = mapped_column(String(255), nullable=True) + bookmarked: Mapped[bool] = mapped_column(Boolean, server_default="false") + public: Mapped[bool] = mapped_column(Boolean, server_default="false") + + total_observations: Mapped[int] = mapped_column(Integer, server_default="0") + total_tokens: Mapped[int] = mapped_column(Integer, server_default="0") + total_cost: Mapped[Decimal | None] = mapped_column(Numeric(12, 6), nullable=True) + duration_ms: Mapped[int | None] = mapped_column(Integer, nullable=True) + + execution_id: Mapped[uuid.UUID] = mapped_column(UUID(as_uuid=True), nullable=False) + agent_version_id: Mapped[uuid.UUID] = mapped_column(UUID(as_uuid=True), nullable=False) + user_id: Mapped[uuid.UUID] = mapped_column(UUID(as_uuid=True), nullable=False) + + created_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), server_default=func.now(), nullable=False) + + +class Observation(Base): + __tablename__ = "observations" + + id: Mapped[uuid.UUID] = mapped_column(UUID(as_uuid=True), primary_key=True) + trace_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + ForeignKey("traces.id", ondelete="CASCADE"), + nullable=False, + ) + parent_observation_id: Mapped[uuid.UUID | None] = mapped_column(UUID(as_uuid=True), nullable=True) + + type: Mapped[str] = mapped_column(String(20), nullable=False) + name: Mapped[str] = mapped_column(String(500), nullable=False) + level: Mapped[str] = mapped_column(String(10), nullable=False, server_default="DEFAULT") + status_message: Mapped[str | None] = mapped_column(Text, nullable=True) + environment: Mapped[str] = mapped_column(String(50), server_default="debug") + + start_time: Mapped[datetime] = mapped_column(DateTime(timezone=True), nullable=False) + end_time: Mapped[datetime | None] = mapped_column(DateTime(timezone=True), nullable=True) + completion_start_time: Mapped[datetime | None] = mapped_column(DateTime(timezone=True), nullable=True) + + input: Mapped[dict | None] = mapped_column(JSONB, nullable=True) + output: Mapped[dict | None] = mapped_column(JSONB, nullable=True) + meta: Mapped[dict | None] = mapped_column("metadata", JSONB, nullable=True) + + model: Mapped[str | None] = mapped_column(String(100), nullable=True) + model_parameters: Mapped[dict | None] = mapped_column(JSONB, nullable=True) + usage_details: Mapped[dict | None] = mapped_column(JSONB, nullable=True) + cost_details: Mapped[dict | None] = mapped_column(JSONB, nullable=True) + prompt_name: Mapped[str | None] = mapped_column(String(255), nullable=True) + prompt_version: Mapped[int | None] = mapped_column(Integer, nullable=True) + + tool_definitions: Mapped[dict | None] = mapped_column(JSONB, nullable=True) + tool_calls: Mapped[list | None] = mapped_column(JSONB, nullable=True) + + execution_id: Mapped[uuid.UUID] = mapped_column(UUID(as_uuid=True), nullable=False) + workspace_id: Mapped[uuid.UUID] = mapped_column(UUID(as_uuid=True), nullable=False) + + created_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), server_default=func.now(), nullable=False) diff --git a/backend/app/core/observation/otel/__init__.py b/backend/app/core/observation/otel/__init__.py new file mode 100644 index 000000000..d4a5695fb --- /dev/null +++ b/backend/app/core/observation/otel/__init__.py @@ -0,0 +1 @@ +"""OpenTelemetry-backed observation pipeline.""" diff --git a/backend/app/core/observation/otel/broadcast_processor.py b/backend/app/core/observation/otel/broadcast_processor.py new file mode 100644 index 000000000..8e6922634 --- /dev/null +++ b/backend/app/core/observation/otel/broadcast_processor.py @@ -0,0 +1,134 @@ +"""BroadcastProcessor — instant WebSocket relay via LiveSpanProcessor.""" + +from __future__ import annotations + +import asyncio +import uuid +from datetime import datetime, timezone +from typing import Any, Callable, Coroutine + +from loguru import logger + +from app.core.observation.otel.processor_base import ( + LiveSpanProcessor, + build_cost, + build_usage, + ns_to_iso, + parse_json_attr, +) +from app.core.observation.otel.span_wrapper import ObservationSpan +from app.core.observation.types import ObservationLevel, ObservationType + + +class BroadcastProcessor(LiveSpanProcessor): + def __init__( + self, + execution_id: uuid.UUID, + trace_id: uuid.UUID, + broadcast_fn: Callable[..., Coroutine[Any, Any, None]] | None, + event_loop: asyncio.AbstractEventLoop, + ) -> None: + self._execution_id = execution_id + self._trace_id = trace_id + self._broadcast_fn = broadcast_fn + self._loop = event_loop + self._otel_span_id_to_observation_id: dict[int, str] = {} + + def _resolve_parent_obs_id(self, span: Any) -> str | None: + if span.parent: + return self._otel_span_id_to_observation_id.get(span.parent.span_id) + return None + + def _build_observation(self, span: Any, *, include_end: bool = False) -> dict: + attrs = span.attributes or {} + obs: dict = { + "id": str(attrs.get("observation.id", "")), + "trace_id": str(self._trace_id), + "parent_observation_id": self._resolve_parent_obs_id(span), + "type": attrs.get("observation.type", ObservationType.SPAN.value), + "name": span.name, + "level": attrs.get("observation.level", ObservationLevel.DEFAULT.value), + "status_message": attrs.get("observation.status_message"), + "start_time": ns_to_iso(span.start_time), + "end_time": ns_to_iso(span.end_time) if include_end else None, + "input": parse_json_attr(attrs.get("observation.input")), + "output": parse_json_attr(attrs.get("observation.output")) if include_end else None, + "metadata": parse_json_attr(attrs.get("observation.metadata")), + "model": attrs.get("llm.model"), + "model_parameters": parse_json_attr(attrs.get("llm.parameters")), + "completion_start_time": attrs.get("llm.completion_start_time"), + "prompt_name": attrs.get("llm.prompt.name"), + "prompt_version": attrs.get("llm.prompt.version"), + "usage_details": build_usage(attrs) if include_end else None, + "cost_details": build_cost(attrs) if include_end else None, + } + return obs + + def on_start(self, span: Any, parent_context: Any = None) -> None: + attrs = span.attributes or {} + obs_id = str(attrs.get("observation.id", "")) + if obs_id and hasattr(span, "context"): + self._otel_span_id_to_observation_id[span.context.span_id] = obs_id + self._emit("span_open", self._build_observation(span)) + + def on_end(self, span: Any) -> None: + self._emit("span_close", self._build_observation(span, include_end=True)) + + if hasattr(span, "context") and span.context: + self._otel_span_id_to_observation_id.pop(span.context.span_id, None) + + def on_event(self, span: ObservationSpan, event_name: str, attributes: dict) -> None: + parent_obs_id: str | None = None + parent_span_id = span.get_parent_span_id() + if parent_span_id is not None: + parent_obs_id = self._otel_span_id_to_observation_id.get(parent_span_id) + self._emit( + event_name, + { + "id": str(span.observation_id), + "trace_id": str(self._trace_id), + "parent_observation_id": parent_obs_id, + }, + data=dict(attributes), + ) + + def emit_trace_complete(self, status: str, trace_id: str, aggregates: dict) -> None: + self._emit( + "trace_complete", + observation=None, + data={"status": status, "trace_id": trace_id, **aggregates}, + ) + + def _emit( + self, + event: str, + observation: dict | None, + data: dict | None = None, + ) -> None: + if not self._broadcast_fn: + return + message = { + "type": "observation", + "execution_id": str(self._execution_id), + "event": event, + "observation": observation, + "data": data or {}, + "timestamp": datetime.now(tz=timezone.utc).isoformat(), + } + try: + future = asyncio.run_coroutine_threadsafe(self._broadcast_fn(self._execution_id, message), self._loop) + future.add_done_callback(self._log_if_failed) + except Exception: + logger.opt(exception=True).debug("broadcast schedule failed") + + @staticmethod + def _log_if_failed(future: Any) -> None: + exc = future.exception() + if exc: + logger.warning("broadcast failed: %s", exc) + + def shutdown(self) -> None: + pass + + def force_flush(self, timeout_millis: int = 30000) -> bool: + return True diff --git a/backend/app/core/observation/otel/persistence_processor.py b/backend/app/core/observation/otel/persistence_processor.py new file mode 100644 index 000000000..575adcc9e --- /dev/null +++ b/backend/app/core/observation/otel/persistence_processor.py @@ -0,0 +1,245 @@ +"""PersistenceProcessor -- deferred-INSERT SpanProcessor writing Observation rows to PG.""" + +from __future__ import annotations + +import asyncio +import uuid +from collections.abc import Mapping +from datetime import datetime +from typing import Any, Callable, Coroutine + +from loguru import logger +from opentelemetry.sdk.trace import ReadableSpan, SpanProcessor + +from app.core.observation.model import Observation +from app.core.observation.otel.processor_base import ( + build_cost, + build_usage, + ns_to_datetime, + parse_json_attr, +) +from app.core.observation.types import ObservationLevel, ObservationType + +_SENTINEL = object() + + +class PersistenceProcessor(SpanProcessor): + """Batched async writer that converts finished OTel spans into Observation rows. + + Designed to bridge OTel's synchronous SpanProcessor callbacks to async + SQLAlchemy persistence via an asyncio.Queue + drain loop running on the + caller's event loop. + """ + + def __init__( + self, + execution_id: uuid.UUID, + trace_id: uuid.UUID, + workspace_id: uuid.UUID, + db_session_factory: Callable[..., Coroutine[Any, Any, Any]], + event_loop: asyncio.AbstractEventLoop, + *, + max_batch: int = 10, + max_wait_ms: int = 300, + max_buffer_size: int = 1000, + ) -> None: + self._execution_id = execution_id + self._trace_id = trace_id + self._workspace_id = workspace_id + self._db_session_factory = db_session_factory + self._loop = event_loop + self._max_batch = max_batch + self._max_wait_ms = max_wait_ms + self._max_buffer_size = max_buffer_size + + self._queue: asyncio.Queue[Any] = asyncio.Queue(maxsize=max_buffer_size) + self._otel_span_id_to_observation_id: dict[int, uuid.UUID] = {} + + # Aggregation state + self._total_tokens = 0 + self._total_cost = 0.0 + self._observation_count = 0 + self._has_error = False + + # Start drain loop on the event loop + self._drain_future = asyncio.run_coroutine_threadsafe(self._drain_loop(), self._loop) + + # ---- SpanProcessor interface ---- + + def on_start(self, span: ReadableSpan, parent_context: Any = None) -> None: # type: ignore[override] + """Stash the OTel span_id -> observation_id mapping for parent resolution.""" + obs_id_str = span.attributes.get("observation.id") # type: ignore[union-attr] + if obs_id_str: + self._otel_span_id_to_observation_id[span.context.span_id] = uuid.UUID( # type: ignore[union-attr] + str(obs_id_str) + ) + + def on_end(self, span: ReadableSpan) -> None: + """Convert a finished span to an Observation and enqueue for persistence.""" + attrs = span.attributes or {} + obs_id_str = attrs.get("observation.id") + if not obs_id_str: + return + + obs_id = uuid.UUID(str(obs_id_str)) + + parent_obs_id: uuid.UUID | None = None + if span.parent: + parent_obs_id = self._otel_span_id_to_observation_id.get(span.parent.span_id) + + obs = Observation( + id=obs_id, + trace_id=self._trace_id, + execution_id=self._execution_id, + workspace_id=self._workspace_id, + parent_observation_id=parent_obs_id, + type=str(attrs.get("observation.type", ObservationType.SPAN.value)), + name=span.name, + level=str(attrs.get("observation.level", ObservationLevel.DEFAULT.value)), + status_message=attrs.get("observation.status_message"), # type: ignore[arg-type] + start_time=ns_to_datetime(span.start_time), + end_time=ns_to_datetime(span.end_time), + input=parse_json_attr(attrs.get("observation.input")), + output=parse_json_attr(attrs.get("observation.output")), + meta=parse_json_attr(attrs.get("observation.metadata")), + model=attrs.get("llm.model"), # type: ignore[arg-type] + model_parameters=parse_json_attr(attrs.get("llm.parameters")), + usage_details=build_usage(attrs), + cost_details=build_cost(attrs), + completion_start_time=self._parse_iso_attr(attrs, "llm.completion_start_time"), + prompt_name=attrs.get("llm.prompt.name"), # type: ignore[arg-type] + prompt_version=self._safe_int(attrs.get("llm.prompt.version")), + tool_calls=parse_json_attr(attrs.get("tool.calls")), + tool_definitions=parse_json_attr(attrs.get("tool.definitions")), + ) + + self._loop.call_soon_threadsafe(self._queue.put_nowait, obs) + + usage_total = attrs.get("llm.usage.total", 0) + if usage_total: + self._total_tokens += int(usage_total) # type: ignore[arg-type] + cost_total = attrs.get("llm.cost.total", 0.0) + if cost_total: + self._total_cost += float(cost_total) # type: ignore[arg-type] + self._observation_count += 1 + if str(attrs.get("observation.level")) == ObservationLevel.ERROR.value: + self._has_error = True + + for event in span.events: + if event.name.startswith("stream."): + continue + event_obs = Observation( + id=uuid.uuid4(), + trace_id=self._trace_id, + execution_id=self._execution_id, + workspace_id=self._workspace_id, + parent_observation_id=obs_id, + type=ObservationType.EVENT.value, + name=event.name, + level=ObservationLevel.DEFAULT.value, + start_time=ns_to_datetime(event.timestamp), + meta=dict(event.attributes) if event.attributes else None, + ) + self._loop.call_soon_threadsafe(self._queue.put_nowait, event_obs) + + # Prune span-id map entry — no longer needed after on_end + if hasattr(span, "context") and span.context: + self._otel_span_id_to_observation_id.pop(span.context.span_id, None) + + def force_flush(self, timeout_millis: int = 30000) -> bool: + """No-op; drain loop handles flushing on its own schedule.""" + return True + + # ---- Public API ---- + + def get_aggregates(self) -> dict: + """Return accumulated token/cost/observation counts.""" + return { + "total_tokens": self._total_tokens, + "total_cost": self._total_cost, + "total_observations": self._observation_count, + "has_error": self._has_error, + } + + def shutdown(self, timeout_millis: int = 10000) -> None: + """Signal the drain loop to exit and wait for completion. + + Sync to satisfy the SpanProcessor contract (OTel calls this from GC). + Our own ObservationTracerProvider.shutdown() calls async_shutdown() + for proper async waiting. + """ + try: + self._loop.call_soon_threadsafe(self._queue.put_nowait, _SENTINEL) + except RuntimeError: + return + try: + self._drain_future.result(timeout=timeout_millis / 1000) + except Exception: + pass + + async def async_shutdown(self) -> None: + """Async variant used by ObservationTracerProvider.shutdown().""" + self._queue.put_nowait(_SENTINEL) + try: + await asyncio.wait_for(asyncio.wrap_future(self._drain_future), timeout=10) + except Exception: + logger.opt(exception=True).warning("PersistenceProcessor drain loop did not exit cleanly") + + # ---- Internal: drain loop & flush ---- + + async def _drain_loop(self) -> None: + """Continuously drain the queue and flush batches to PG.""" + buffer: list[Observation] = [] + while True: + try: + item = await asyncio.wait_for(self._queue.get(), timeout=self._max_wait_ms / 1000) + except asyncio.TimeoutError: + if buffer: + await self._flush(buffer) + buffer.clear() + continue + + if item is _SENTINEL: + if buffer: + await self._flush(buffer) + break + + buffer.append(item) + if len(buffer) >= self._max_batch: + await self._flush(buffer) + buffer.clear() + + async def _flush(self, buffer: list[Observation]) -> None: + """Write a batch of Observation rows to PG.""" + if not buffer: + return + session = await self._db_session_factory() + try: + session.add_all(buffer) + await session.commit() + except Exception: + logger.opt(exception=True).warning("PersistenceProcessor flush failed") + await session.rollback() + finally: + await session.close() + + # ---- Helpers ---- + + @staticmethod + def _parse_iso_attr(attrs: Mapping[str, Any], key: str) -> datetime | None: + val = attrs.get(key) + if not val or not isinstance(val, str): + return None + try: + return datetime.fromisoformat(val) + except ValueError: + return None + + @staticmethod + def _safe_int(val: Any) -> int | None: + if val is None: + return None + try: + return int(val) + except (TypeError, ValueError): + return None diff --git a/backend/app/core/observation/otel/processor_base.py b/backend/app/core/observation/otel/processor_base.py new file mode 100644 index 000000000..f1ffc9cc8 --- /dev/null +++ b/backend/app/core/observation/otel/processor_base.py @@ -0,0 +1,71 @@ +"""Base SpanProcessor extension that adds an on_event hook for live streaming.""" + +from __future__ import annotations + +import json +from collections.abc import Mapping +from datetime import datetime, timezone +from typing import Any + +from opentelemetry.sdk.trace import SpanProcessor + + +class LiveSpanProcessor(SpanProcessor): + """SpanProcessor variant that also receives live (mid-span) events. + + OTel's stock SpanProcessor only fires on_start/on_end. LiveSpanProcessor + adds on_event so streaming token / intermediate-update events can be + pushed out the moment they happen — bypassing on_end batching. + """ + + def on_event(self, span: Any, event_name: str, attributes: dict) -> None: + """Called by ObservationSpan when a live event is emitted. Default: no-op.""" + return None + + +def parse_json_attr(val: Any) -> Any: + if val is None: + return None + if isinstance(val, str): + try: + return json.loads(val) + except (json.JSONDecodeError, ValueError): + return val + return val + + +def ns_to_datetime(ns: int | None) -> datetime | None: + if ns is None: + return None + return datetime.fromtimestamp(ns / 1e9, tz=timezone.utc) + + +def ns_to_iso(ns: int | None) -> str | None: + dt = ns_to_datetime(ns) + return dt.isoformat() if dt else None + + +def build_usage(attrs: Mapping[str, Any]) -> dict | None: + inp = attrs.get("llm.usage.input") + out = attrs.get("llm.usage.output") + total = attrs.get("llm.usage.total") + if inp is None and out is None and total is None: + return None + return { + "input": int(inp) if inp is not None else 0, + "output": int(out) if out is not None else 0, + "total": int(total) if total is not None else 0, + } + + +def build_cost(attrs: Mapping[str, Any]) -> dict | None: + inp = attrs.get("llm.cost.input") + out = attrs.get("llm.cost.output") + total = attrs.get("llm.cost.total") + if inp is None and out is None and total is None: + return None + return { + "input": float(inp) if inp is not None else 0.0, + "output": float(out) if out is not None else 0.0, + "total": float(total) if total is not None else 0.0, + } diff --git a/backend/app/core/observation/otel/provider.py b/backend/app/core/observation/otel/provider.py new file mode 100644 index 000000000..fb0d64797 --- /dev/null +++ b/backend/app/core/observation/otel/provider.py @@ -0,0 +1,62 @@ +"""ObservationTracerProvider — per-execution OTel TracerProvider lifecycle.""" + +from __future__ import annotations + +import asyncio +import uuid +from typing import Any, Callable, Coroutine + +from opentelemetry.sdk.resources import Resource +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.trace import Tracer + +from app.core.observation.otel.broadcast_processor import BroadcastProcessor +from app.core.observation.otel.persistence_processor import PersistenceProcessor +from app.core.observation.otel.processor_base import LiveSpanProcessor +from app.core.observation.otel.span_wrapper import ObservationSpan + + +class ObservationTracerProvider: + def __init__( + self, + execution_id: uuid.UUID, + trace_id: uuid.UUID, + workspace_id: uuid.UUID, + db_session_factory: Callable[..., Coroutine[Any, Any, Any]], + broadcast_fn: Callable[..., Coroutine[Any, Any, None]] | None, + event_loop: asyncio.AbstractEventLoop, + ) -> None: + self._provider = TracerProvider( + resource=Resource.create( + { + "service.name": "joysafeter", + "execution.id": str(execution_id), + "trace.id": str(trace_id), + "workspace.id": str(workspace_id), + } + ) + ) + self._persistence = PersistenceProcessor(execution_id, trace_id, workspace_id, db_session_factory, event_loop) + self._trace_id = trace_id + self._broadcast = BroadcastProcessor(execution_id, trace_id, broadcast_fn, event_loop) + self._provider.add_span_processor(self._persistence) + self._provider.add_span_processor(self._broadcast) + self._tracer = self._provider.get_tracer("joysafeter.observation") + self._live_processors: list[LiveSpanProcessor] = [self._broadcast] + + def get_tracer(self) -> Tracer: + return self._tracer + + def dispatch_live_event(self, span: ObservationSpan, event_name: str, attributes: dict) -> None: + for proc in self._live_processors: + proc.on_event(span, event_name, attributes) + + def get_persistence_aggregates(self) -> dict: + return self._persistence.get_aggregates() + + def broadcast_trace_complete(self, status: str, aggregates: dict) -> None: + self._broadcast.emit_trace_complete(status, str(self._trace_id), aggregates) + + async def shutdown(self) -> None: + await self._persistence.async_shutdown() + self._broadcast.shutdown() diff --git a/backend/app/core/observation/otel/span_wrapper.py b/backend/app/core/observation/otel/span_wrapper.py new file mode 100644 index 000000000..9ba3d0fd6 --- /dev/null +++ b/backend/app/core/observation/otel/span_wrapper.py @@ -0,0 +1,119 @@ +"""ObservationSpan — typed wrapper over an OTel Span with observation-schema setters.""" + +from __future__ import annotations + +import json +import uuid +from datetime import datetime +from typing import TYPE_CHECKING, Any + +from opentelemetry.trace import Span + +from app.core.observation.types import ObservationLevel, ObservationType + +if TYPE_CHECKING: + from app.core.observation.otel.provider import ObservationTracerProvider + + +def _safe_json(value: Any) -> str: + return json.dumps(value, default=str) + + +class ObservationSpan: + __slots__ = ("_span", "observation_id", "_provider") + + def __init__( + self, + otel_span: Span, + observation_id: uuid.UUID, + provider: "ObservationTracerProvider", + ) -> None: + self._span = otel_span + self.observation_id = observation_id + self._provider = provider + + # --- typed attribute setters --- + + def set_input(self, value: Any) -> None: + self._span.set_attribute("observation.input", _safe_json(value)) + + def set_output(self, value: Any) -> None: + self._span.set_attribute("observation.output", _safe_json(value)) + + def set_metadata(self, value: dict) -> None: + self._span.set_attribute("observation.metadata", _safe_json(value)) + + def set_model(self, name: str) -> None: + self._span.set_attribute("llm.model", name) + + def set_model_parameters(self, params: dict) -> None: + self._span.set_attribute("llm.parameters", _safe_json(params)) + + def set_usage(self, usage: dict) -> None: + for key in ("input", "output", "total"): + if key in usage: + self._span.set_attribute(f"llm.usage.{key}", usage[key]) + + def set_cost(self, cost: dict) -> None: + for key in ("input", "output", "total"): + if key in cost: + self._span.set_attribute(f"llm.cost.{key}", cost[key]) + + def set_level(self, level: ObservationLevel) -> None: + self._span.set_attribute("observation.level", level.value) + + def set_status_message(self, msg: str) -> None: + self._span.set_attribute("observation.status_message", msg) + + def set_observation_type(self, t: ObservationType) -> None: + self._span.set_attribute("observation.type", t.value) + + def set_prompt(self, name: str, version: str | None) -> None: + self._span.set_attribute("llm.prompt.name", name) + if version is not None: + self._span.set_attribute("llm.prompt.version", version) + + def set_tool_calls(self, calls: list) -> None: + self._span.set_attribute("tool.calls", _safe_json(calls)) + + def set_tool_definitions(self, defs: list) -> None: + self._span.set_attribute("tool.definitions", _safe_json(defs)) + + def set_completion_start_time(self, ts: datetime) -> None: + self._span.set_attribute("llm.completion_start_time", ts.isoformat()) + + def get_parent_span_id(self) -> int | None: + parent = getattr(self._span, "parent", None) + return parent.span_id if parent else None + + # --- streaming events --- + + def flush_streaming_text(self, text: str) -> None: + self._provider.dispatch_live_event(self, "streaming_text", {"text": text}) + + def add_intermediate_update(self, payload: dict) -> None: + self._span.add_event( + "stream.intermediate_update", + { + "payload_json": json.dumps(payload, default=str), + }, + ) + self._provider.dispatch_live_event(self, "span_update", payload) + + def add_event(self, name: str, attributes: dict[str, str] | None = None) -> None: + self._span.add_event(name, attributes or {}) + + # --- lifecycle --- + + def record_error(self, exc: Exception, level: ObservationLevel) -> None: + self._span.set_attribute("observation.level", level.value) + self._span.set_attribute("observation.status_message", str(exc)) + + def get_context(self) -> Any: + """Return an OTel Context with this span set as current.""" + from opentelemetry import trace as _trace + + return _trace.set_span_in_context(self._span) + + def end(self) -> None: + self._span.end() diff --git a/backend/app/core/observation/types.py b/backend/app/core/observation/types.py new file mode 100644 index 000000000..c5b811f2f --- /dev/null +++ b/backend/app/core/observation/types.py @@ -0,0 +1,30 @@ +""" +Canonical observation types — the single source of truth for Langfuse-aligned tracing. + +Values MUST match Langfuse SDK enums exactly (uppercase). Used by ObservationCollector +to emit observation events to the trace tree. +""" + +from __future__ import annotations + +from enum import StrEnum + + +class ObservationType(StrEnum): + SPAN = "SPAN" + EVENT = "EVENT" + GENERATION = "GENERATION" + AGENT = "AGENT" + TOOL = "TOOL" + CHAIN = "CHAIN" + RETRIEVER = "RETRIEVER" + EMBEDDING = "EMBEDDING" + EVALUATOR = "EVALUATOR" + GUARDRAIL = "GUARDRAIL" + + +class ObservationLevel(StrEnum): + DEBUG = "DEBUG" + DEFAULT = "DEFAULT" + WARNING = "WARNING" + ERROR = "ERROR" diff --git a/backend/app/core/ports/__init__.py b/backend/app/core/ports/__init__.py new file mode 100644 index 000000000..b5220352f --- /dev/null +++ b/backend/app/core/ports/__init__.py @@ -0,0 +1,14 @@ +""" +Ports — Protocol interfaces defining the boundary between core/ and services/. + +core/ modules depend on these Protocols (dependency inversion). +services/ modules provide concrete implementations. +""" + +from app.core.ports.execution import EventContext, ExecutionEventPort, ExecutionReaderPort + +__all__ = [ + "EventContext", + "ExecutionEventPort", + "ExecutionReaderPort", +] diff --git a/backend/app/core/ports/execution.py b/backend/app/core/ports/execution.py new file mode 100644 index 000000000..eb8282ac6 --- /dev/null +++ b/backend/app/core/ports/execution.py @@ -0,0 +1,94 @@ +""" +Execution ports — Protocol interfaces for core/ ↔ services/ decoupling. + +core/ modules depend on these Protocols (not concrete service classes). +services/ modules provide the implementations. +""" + +from __future__ import annotations + +import uuid +from dataclasses import dataclass +from typing import Any, Mapping, Optional, Protocol, runtime_checkable + +from app.core.events.event_types import ExecutionEventType + + +@dataclass +class EventContext: + """Run-level metadata injected by the caller (e.g. ExecutionRunner). + + Allows event publishing to construct complete envelopes + without querying the DB for run metadata on every event. + """ + + run_id: uuid.UUID + workspace_id: uuid.UUID + trigger_medium: Optional[str] = None + run_purpose: Optional[str] = None + thread_id: Optional[uuid.UUID] = None + task_id: Optional[uuid.UUID] = None + + +@runtime_checkable +class ExecutionEventPort(Protocol): + """Port for publishing execution events through the event bus. + + Implemented by: services/execution_event_adapter.py + Used by: core/agent/cli_backends/execution_runner.py + """ + + def set_event_context(self, ctx: EventContext) -> None: ... + + async def mark_status( + self, + *, + execution_id: uuid.UUID, + status: str, + container_id: Optional[str] = None, + session_id: Optional[str] = None, + error: Mapping[str, Any] | None = None, + result_summary: Optional[dict[str, Any]] = None, + ) -> Any: ... + + async def append_event( + self, + *, + execution_id: uuid.UUID, + event_type: ExecutionEventType, + payload: dict[str, Any], + ) -> Any: ... + + async def batch_append_events( + self, + *, + execution_id: uuid.UUID, + events: list[dict[str, Any]], + ) -> list: ... + + async def complete_execution( + self, + *, + execution_id: uuid.UUID, + terminal_status: str, + result_summary: Optional[dict] = None, + error: Mapping[str, Any] | None = None, + session_id: Optional[str] = None, + ) -> None: ... + + +@runtime_checkable +class ExecutionReaderPort(Protocol): + """Port for reading execution data without direct ORM queries in core/. + + Implemented by: services/execution_reader_adapter.py (Phase 2) + Used by: core/agent/cli_backends/execution_runner.py + """ + + async def get_execution(self, execution_id: uuid.UUID) -> Any: ... + + async def get_run_for_execution(self, execution_id: uuid.UUID) -> Any: ... + + async def get_release_for_run(self, run_id: uuid.UUID) -> Any: ... + + async def get_task_auto_approve(self, task_id: uuid.UUID) -> bool: ... diff --git a/backend/app/core/rate_limit.py b/backend/app/core/rate_limit.py index 58828f704..cc39e98df 100644 --- a/backend/app/core/rate_limit.py +++ b/backend/app/core/rate_limit.py @@ -9,7 +9,7 @@ from fastapi import Request -from app.common.exceptions import AppException +from app.common.app_errors import RateLimitExceededError class RateLimiter: @@ -135,13 +135,14 @@ async def wrapper(*args, **kwargs): # check rate limit if not _rate_limiter.is_allowed(rate_limit_key, max_requests, window_seconds): remaining = _rate_limiter.get_remaining(rate_limit_key, max_requests, window_seconds) - raise AppException( - status_code=429, + raise RateLimitExceededError( message=f"Rate limit exceeded. Try again in {window_seconds} seconds.", - headers={ - "X-RateLimit-Limit": str(max_requests), - "X-RateLimit-Remaining": str(remaining), - "X-RateLimit-Reset": str(int(time.time() + window_seconds)), + data={ + "headers": { + "X-RateLimit-Limit": str(max_requests), + "X-RateLimit-Remaining": str(remaining), + "X-RateLimit-Reset": str(int(time.time() + window_seconds)), + } }, ) diff --git a/backend/app/core/redis.py b/backend/app/core/redis.py index 819f4324f..4dc7d4328 100644 --- a/backend/app/core/redis.py +++ b/backend/app/core/redis.py @@ -4,7 +4,7 @@ import json from contextlib import asynccontextmanager -from typing import Any, Awaitable, Dict, Optional, cast +from typing import Any, Awaitable, Optional, cast import redis.asyncio as redis_async from loguru import logger @@ -173,41 +173,6 @@ async def lock(cls, name: str, timeout: int = 60, blocking_timeout: int = 60): except Exception as e: logger.warning(f"Error releasing lock {name}: {e}") - # ==================== Generic Run Methods ==================== - - @classmethod - async def publish_run_event(cls, run_id: str, event: Dict[str, Any]) -> bool: - """Publish a durable run event to subscribers.""" - if not cls._client: - return False - channel = f"runs:{run_id}:events" - event_str = json.dumps(event, ensure_ascii=False) - await cls._client.publish(channel, event_str) - return True - - @classmethod - async def set_run_snapshot(cls, run_id: str, snapshot: Dict[str, Any], ttl: int = 86400) -> bool: - """Cache the latest run snapshot.""" - if not cls._client: - return False - key = f"runs:{run_id}:snapshot" - await cls._client.set(key, json.dumps(snapshot, ensure_ascii=False), ex=ttl) - return True - - @classmethod - async def get_run_snapshot(cls, run_id: str) -> Optional[Dict[str, Any]]: - """Get the latest cached run snapshot.""" - if not cls._client: - return None - key = f"runs:{run_id}:snapshot" - data = await cls._client.get(key) - if data is None: - return None - try: - return cast(Dict[str, Any], json.loads(data)) - except (TypeError, ValueError): - return None - # Helper function async def get_redis() -> Optional[redis_async.Redis]: diff --git a/backend/app/core/scheduler.py b/backend/app/core/scheduler.py new file mode 100644 index 000000000..49185edf0 --- /dev/null +++ b/backend/app/core/scheduler.py @@ -0,0 +1,111 @@ +""" +Background scheduler loops for task auto-dispatch and stale execution reaping. + +Registered in app lifespan (main.py). Each function is an infinite async loop +following the same pattern as _container_reaper. +""" + +from __future__ import annotations + +import asyncio +from datetime import timedelta + +from loguru import logger + +from app.core.database import AsyncSessionLocal + +_DISPATCH_INTERVAL = 30 +_REAPER_INTERVAL = 30 + +_STALE_THRESHOLDS: list[tuple[tuple[str, ...], timedelta]] = [ + ( + ("pending", "dispatched"), + timedelta(minutes=5), + ), + ( + ("running",), + timedelta(minutes=10), + ), + ( + ("approval_wait",), + timedelta(minutes=60), + ), +] + + +async def task_dispatcher_loop() -> None: + """Every 30s, find BACKLOG tasks with agent assignees and dispatch them.""" + while True: + await asyncio.sleep(_DISPATCH_INTERVAL) + try: + async with AsyncSessionLocal() as db: + from sqlalchemy import select + + from app.models.task import Task + + # Find backlog tasks with assigned agents + tasks = ( + ( + await db.execute( + select(Task).where( + Task.status == "backlog", + Task.agent_id.isnot(None), + ) + ) + ) + .scalars() + .all() + ) + + count = 0 + for task in tasks: + try: + from app.services.dispatch_service import DispatchService + + dispatch = DispatchService(db) + await dispatch.dispatch_task(task.id, task.creator_id) + count += 1 + except Exception as task_exc: + logger.warning(f"Auto-dispatch failed for task {task.id}: {task_exc}") + + if count: + logger.info(f"Scheduler: auto-dispatched {count} tasks") + except Exception as exc: + logger.warning(f"Task dispatcher error: {exc}") + + +async def execution_reaper_loop() -> None: + """Every 30s, find stale executions and mark them failed.""" + while True: + await asyncio.sleep(_REAPER_INTERVAL) + try: + reaped = await _reap_stale_executions() + if reaped: + logger.info(f"Scheduler: reaped {reaped} stale executions") + except Exception as exc: + logger.warning(f"Execution reaper error: {exc}") + + +async def recover_stale_on_startup() -> None: + """One-shot: catch executions that went stale during downtime.""" + try: + reaped = await _reap_stale_executions() + if reaped: + logger.info(f"Startup recovery: reaped {reaped} stale executions") + else: + logger.info("Startup recovery: no stale executions found") + except Exception as exc: + logger.warning(f"Startup stale execution recovery failed: {exc}") + + +async def _reap_stale_executions() -> int: + """Shared logic for reaper loop and startup recovery. + + Delegates all business logic to ExecutionService.reap_stale_executions + so the scheduler only decides *when* to run and *what thresholds* to use. + """ + async with AsyncSessionLocal() as db: + from app.services.execution_service import ExecutionService + + svc = ExecutionService(db) + return await svc.reap_stale_executions(_STALE_THRESHOLDS) diff --git a/backend/app/core/settings.py b/backend/app/core/settings.py index f1180e122..cdcff6c91 100644 --- a/backend/app/core/settings.py +++ b/backend/app/core/settings.py @@ -106,6 +106,20 @@ def _force_code_version(cls, v: str) -> str: # noqa: ARG003 validation_alias=AliasChoices("DATABASE_MAX_OVERFLOW", "DB_MAX_OVERFLOW"), description="Database connection pool max overflow", ) + checkpointer_pool_min_size: int = Field( + default=1, + validation_alias=AliasChoices( + "DB_POOL_MIN_SIZE", + ), + description="Min connections for the LangGraph checkpointer psycopg pool", + ) + checkpointer_pool_max_size: int = Field( + default=10, + validation_alias=AliasChoices( + "DB_POOL_MAX_SIZE", + ), + description="Max connections for the LangGraph checkpointer psycopg pool", + ) @property def database_url(self) -> str: @@ -347,6 +361,26 @@ def parse_cors_origins(cls, v: Union[str, List[str]]) -> List[str]: default=False, description="Enable Langfuse tracing (requires langfuse_public_key and langfuse_secret_key)" ) + # Artifact Storage + agent_artifacts_root: Optional[str] = Field( + default=None, + description="Root directory for agent artifacts (default: ~/.agent-platform/agent-artifacts)", + ) + deepagents_artifacts_dir: Optional[str] = Field( + default=None, + description="Root directory for DeepAgents artifacts", + ) + + # OpenClaw + openclaw_image: str = Field( + default="jdopensource/joysafeter-openclaw:latest", + description="Docker image for OpenClaw instances", + ) + openclaw_network: str = Field( + default="joysafeter-network", + description="Docker network for OpenClaw instances", + ) + # UV Package Manager Configuration uv_index_url: str = Field( default="https://pypi.org/simple", @@ -368,11 +402,6 @@ def parse_cors_origins(cls, v: Union[str, List[str]]) -> List[str]: description="Workspace root directory for storing session files and workspace data", ) - @property - def WORKSPACE_ROOT(self) -> str: - """Alias for workspace_root for backward compatibility""" - return self.workspace_root - # OAuth Configuration oauth_config_path: Optional[str] = Field( default=None, diff --git a/backend/app/core/skill/exceptions.py b/backend/app/core/skill/exceptions.py index 30f35d726..77ee26c44 100644 --- a/backend/app/core/skill/exceptions.py +++ b/backend/app/core/skill/exceptions.py @@ -1,50 +1,70 @@ -"""Custom exceptions for skill loading operations. +"""Custom exceptions for skill operations. -This module defines exception classes for skill loading operations, -providing better error classification and handling. +All skill exceptions inherit from AppError subclasses, integrating with +the unified error system while remaining catchable by specific type. """ - -class SkillLoadError(Exception): - """Base exception for skill loading operations. - - All skill-related exceptions inherit from this class, - allowing callers to catch all skill loading errors with a single except clause. - """ - - pass - - -class SkillNotFoundError(SkillLoadError): - """Raised when a skill is not found or access is denied. - - This exception is raised when: - - The skill ID does not exist in the database - - The user does not have permission to access the skill - - The skill service returns None for a skill query - """ - - pass - - -class SkillPermissionDeniedError(SkillLoadError): - """Raised when user lacks permission to access a skill. - - This exception is raised when: - - User tries to access a private skill owned by another user - - Permission check fails during skill retrieval - """ - - pass - - -class SkillFileWriteError(SkillLoadError): - """Raised when writing skill files to sandbox fails. - - This exception is raised when: - - File write operation fails - - Backend write() method returns an error - - File system errors occur during write - """ - - pass +from __future__ import annotations + +from typing import Any, Mapping + +from app.common.app_errors import ( + AccessDeniedError, + DomainError, + InternalServiceError, + NotFoundError, +) + + +class SkillLoadError(DomainError): + _default_source: str = "runtime" + + def __init__( + self, + message: str = "技能加载失败", + *, + code: str = "SKILL_LOAD_FAILED", + data: Mapping[str, Any] | None = None, + **kw: Any, + ): + kw.setdefault("source", self._default_source) + super().__init__(code=code, message=message, data=data, **kw) + + +class SkillNotFoundError(NotFoundError): + def __init__( + self, + message: str = "技能未找到", + *, + code: str = "SKILL_NOT_FOUND", + data: Mapping[str, Any] | None = None, + **kw: Any, + ): + super().__init__(code=code, message=message, data=data, **kw) + + +class SkillPermissionDeniedError(AccessDeniedError): + def __init__( + self, + message: str = "技能访问被拒绝", + *, + code: str = "SKILL_ACCESS_DENIED", + data: Mapping[str, Any] | None = None, + **kw: Any, + ): + super().__init__(code=code, message=message, data=data, **kw) + + +class SkillFileWriteError(InternalServiceError): + _default_source: str = "runtime" + + def __init__( + self, + message: str = "技能文件写入失败", + *, + code: str = "SKILL_FILE_WRITE_FAILED", + data: Mapping[str, Any] | None = None, + **kw: Any, + ): + kw.setdefault("source", self._default_source) + super().__init__(code=code, message=message, data=data, **kw) diff --git a/backend/app/core/state_machines/__init__.py b/backend/app/core/state_machines/__init__.py new file mode 100644 index 000000000..f5d86ff28 --- /dev/null +++ b/backend/app/core/state_machines/__init__.py @@ -0,0 +1,44 @@ +"""Centralized state machine definitions and transition functions.""" + +from app.core.state_machines.definitions import ( + AGENT_STATES, + AGENT_TERMINAL, + EXECUTION_STATES, + EXECUTION_TERMINAL, + RELEASE_STATES, + RELEASE_TERMINAL, + RUN_STATES, + RUN_TERMINAL, + RUN_TO_TASK_SYNC, + TASK_STATES, + TASK_TERMINAL, + VERSION_STATES, + VERSION_TERMINAL, +) +from app.core.state_machines.engine import InvalidTransition, StateMachine + +# Pre-built state machine instances +AGENT_SM = StateMachine("Agent", AGENT_STATES, AGENT_TERMINAL) +VERSION_SM = StateMachine("AgentVersion", VERSION_STATES, VERSION_TERMINAL) +RELEASE_SM = StateMachine("AgentRelease", RELEASE_STATES, RELEASE_TERMINAL) +RUN_SM = StateMachine("AgentRun", RUN_STATES, RUN_TERMINAL) +EXECUTION_SM = StateMachine("Execution", EXECUTION_STATES, EXECUTION_TERMINAL) +TASK_SM = StateMachine("Task", TASK_STATES, TASK_TERMINAL) + +__all__ = [ + "AGENT_SM", + "VERSION_SM", + "RELEASE_SM", + "RUN_SM", + "EXECUTION_SM", + "TASK_SM", + "InvalidTransition", + "StateMachine", + "RUN_TO_TASK_SYNC", + "AGENT_STATES", + "VERSION_STATES", + "RELEASE_STATES", + "RUN_STATES", + "EXECUTION_STATES", + "TASK_STATES", +] diff --git a/backend/app/core/state_machines/definitions.py b/backend/app/core/state_machines/definitions.py new file mode 100644 index 000000000..636e06654 --- /dev/null +++ b/backend/app/core/state_machines/definitions.py @@ -0,0 +1,92 @@ +""" +Centralized state transition definitions for all domain entities. + +This is the single source of truth for: +- What statuses each entity can have +- What transitions are allowed between statuses +- Which statuses are terminal (no outbound transitions) +- How Run completion maps to Task status +""" + +from __future__ import annotations + +# --------------------------------------------------------------------------- +# Agent +# --------------------------------------------------------------------------- +AGENT_STATES: dict[str, set[str]] = { + "draft": {"active", "archived"}, + "active": {"draft", "archived"}, + "archived": {"draft"}, +} +AGENT_TERMINAL: set[str] = set() # archived can be reverted + +# --------------------------------------------------------------------------- +# AgentVersion +# --------------------------------------------------------------------------- +VERSION_STATES: dict[str, set[str]] = { + "draft": {"frozen"}, + "frozen": {"draft"}, # unfreeze +} +VERSION_TERMINAL: set[str] = set() + +# --------------------------------------------------------------------------- +# AgentRelease +# --------------------------------------------------------------------------- +RELEASE_STATES: dict[str, set[str]] = { + "ready": {"active", "retired"}, + "active": {"superseded"}, + "superseded": {"active", "retired"}, + "failed": {"retired"}, + "retired": set(), +} +RELEASE_TERMINAL: set[str] = {"retired"} + +# --------------------------------------------------------------------------- +# AgentRun +# --------------------------------------------------------------------------- +RUN_STATES: dict[str, set[str]] = { + "pending": {"running", "cancelled"}, + "running": {"succeeded", "failed", "cancelled"}, + "succeeded": set(), + "failed": set(), + "cancelled": set(), +} +RUN_TERMINAL: set[str] = {"succeeded", "failed", "cancelled"} + +# --------------------------------------------------------------------------- +# Execution +# --------------------------------------------------------------------------- +EXECUTION_STATES: dict[str, set[str]] = { + "pending": {"dispatched", "running", "cancelled", "failed"}, + "dispatched": {"running", "failed", "cancelled"}, + "running": {"approval_wait", "succeeded", "failed", "cancelled"}, + "approval_wait": {"running", "cancelled"}, + "succeeded": set(), + "failed": set(), + "cancelled": set(), +} +EXECUTION_TERMINAL: set[str] = {"succeeded", "failed", "cancelled"} + +# --------------------------------------------------------------------------- +# Task +# --------------------------------------------------------------------------- +TASK_STATES: dict[str, set[str]] = { + "backlog": {"todo", "in_progress", "cancelled"}, + "todo": {"in_progress", "backlog", "cancelled"}, + "in_progress": {"done", "in_review", "cancelled", "backlog"}, + "in_review": {"in_progress", "done", "backlog", "cancelled"}, + "done": {"backlog"}, + "cancelled": {"backlog"}, +} +TASK_TERMINAL: set[str] = set() # done/cancelled can be reopened via backlog + +# --------------------------------------------------------------------------- +# Cross-entity sync: Run terminal status -> Task target status +# --------------------------------------------------------------------------- +RUN_TO_TASK_SYNC: dict[str, str] = { + "pending": "in_progress", + "running": "in_progress", + "succeeded": "done", + "failed": "in_review", + "cancelled": "backlog", +} diff --git a/backend/app/core/state_machines/engine.py b/backend/app/core/state_machines/engine.py new file mode 100644 index 000000000..18f79ce1e --- /dev/null +++ b/backend/app/core/state_machines/engine.py @@ -0,0 +1,63 @@ +"""Generic state machine engine.""" + +from __future__ import annotations + +from app.common.app_errors import DomainError + + +class InvalidTransition(DomainError): + """Raised when a status transition violates the state machine rules.""" + + def __init__(self, entity: str, from_status: str, to_status: str): + self.entity = entity + self.from_status = from_status + self.to_status = to_status + super().__init__( + code="STATE_TRANSITION_INVALID", + message=f"{entity}: cannot transition from '{from_status}' to '{to_status}'", + source="engine", + data={"entity": entity, "from_status": from_status, "to_status": to_status}, + ) + + +class StateMachine: + """ + Validates status transitions against a declared transition table. + + Usage: + sm = StateMachine("AgentRun", RUN_STATES, RUN_TERMINAL) + sm.validate("pending", "running") # OK + sm.validate("succeeded", "running") # raises InvalidTransition + """ + + def __init__( + self, + name: str, + transitions: dict[str, set[str]], + terminal: set[str], + ): + self.name = name + self._transitions = transitions + self._terminal = terminal + + def validate(self, from_status: str, to_status: str) -> None: + allowed = self._transitions.get(from_status) + if allowed is None: + raise InvalidTransition(self.name, from_status, to_status) + if to_status not in allowed: + raise InvalidTransition(self.name, from_status, to_status) + + def is_terminal(self, status: str) -> bool: + return status in self._terminal + + @property + def all_statuses(self) -> set[str]: + return set(self._transitions.keys()) + + @property + def initial_statuses(self) -> set[str]: + """Statuses that are not a target of any transition (entry points).""" + all_targets: set[str] = set() + for targets in self._transitions.values(): + all_targets |= targets + return self.all_statuses - all_targets diff --git a/backend/app/core/state_machines/transitions.py b/backend/app/core/state_machines/transitions.py new file mode 100644 index 000000000..74d01262d --- /dev/null +++ b/backend/app/core/state_machines/transitions.py @@ -0,0 +1,97 @@ +""" +Centralized status transition functions. + +These are the ONLY functions that should modify .status on domain entities. +All status changes in the codebase should route through here. +""" + +from __future__ import annotations + +import uuid +from typing import Optional + +from sqlalchemy import select +from sqlalchemy.ext.asyncio import AsyncSession + +from app.core.state_machines import ( + EXECUTION_SM, + RUN_SM, + RUN_TO_TASK_SYNC, + TASK_SM, +) +from app.core.state_machines.engine import InvalidTransition +from app.utils.datetime import utc_now + + +async def transition_run( + run, # AgentRun — avoid circular import + to_status: str, + db: AsyncSession, + result_summary: str | None = None, +) -> None: + """Transition an AgentRun to a new status with validation.""" + RUN_SM.validate(run.status, to_status) + run.status = to_status + if to_status == "running": + run.ended_at = None + if not run.started_at: + run.started_at = utc_now() + if RUN_SM.is_terminal(to_status): + run.ended_at = run.ended_at or utc_now() + if result_summary is not None: + run.result_summary = result_summary + await db.flush() + + +async def transition_execution( + execution, # Execution + to_status: str, + db: AsyncSession, +) -> None: + """Transition an Execution to a new status with validation.""" + EXECUTION_SM.validate(execution.status, to_status) + execution.status = to_status + if to_status == "running" and not execution.started_at: + execution.started_at = utc_now() + if EXECUTION_SM.is_terminal(to_status): + execution.ended_at = execution.ended_at or utc_now() + await db.flush() + + +async def transition_task( + task, # Task + to_status: str, + db: AsyncSession, + *, + latest_run_id: Optional[uuid.UUID] = None, +) -> None: + """Transition a Task to a new status with validation.""" + TASK_SM.validate(task.status, to_status) + task.status = to_status + if latest_run_id is not None: + task.latest_run_id = latest_run_id + await db.flush() + + +async def sync_task_from_run( + run, # AgentRun + db: AsyncSession, +) -> None: + """Auto-sync Task status based on Run terminal status.""" + if not run.task_id: + return + from app.models.task import Task + + task = (await db.execute(select(Task).where(Task.id == run.task_id))).scalar_one_or_none() + if not task: + return + target = RUN_TO_TASK_SYNC.get(run.status) + if target and task.status != target: + task.latest_run_id = run.id + try: + await transition_task(task, target, db) + except InvalidTransition: + # Edge case: task was manually moved to a state where auto-sync + # is not valid (e.g., user set it to "done" before run finished). + # Don't override the manual decision. + return diff --git a/backend/app/core/tools/mcp_tool_utils.py b/backend/app/core/tools/mcp_tool_utils.py index f4ebde670..50f75a774 100644 --- a/backend/app/core/tools/mcp_tool_utils.py +++ b/backend/app/core/tools/mcp_tool_utils.py @@ -13,16 +13,16 @@ from loguru import logger from sqlalchemy.ext.asyncio import AsyncSession +from app.common.app_errors import AccessDeniedError, InvalidRequestError, NotFoundError, ServiceUnavailableError from app.core.tools.tool import EnhancedTool from app.core.tools.tool_registry import MCP_TOOL_KEY_SEPARATOR, get_global_registry from app.models.mcp import McpServer -from app.services.mcp_server_service import McpServerService def _assert_not_uuid(server_identifier: str, context: str = "") -> None: """Assert that server_identifier is not a UUID. - Raise AssertionError if server_identifier is a valid UUID, + Raise InvalidRequestError if server_identifier is a valid UUID, ensuring we always use server names rather than UUIDs. Args: @@ -30,18 +30,17 @@ def _assert_not_uuid(server_identifier: str, context: str = "") -> None: context: Context info for the error message. Raises: - AssertionError: If server_identifier is a UUID. + InvalidRequestError: If server_identifier is a UUID. """ if not server_identifier: return try: uuid.UUID(server_identifier) - # valid UUID — raise assertion error - context_msg = f" in {context}" if context else "" - raise AssertionError( - f"Server identifier must be a server name, not UUID{context_msg}: {server_identifier}. " - f"Please use the server name (e.g., 'my_server') instead of UUID." + raise InvalidRequestError( + "Server identifier must use the MCP server name instead of UUID", + code="MCP_SERVER_IDENTIFIER_INVALID", + data={"server_identifier": server_identifier, "context": context or None}, ) except (ValueError, AttributeError, TypeError): # not a UUID — passes the check @@ -93,7 +92,7 @@ async def resolve_mcp_server_instance(server_name: str, user_id: str, db: AsyncS McpServer instance, or None if not found or deleted. Raises: - AssertionError: If server_name is a UUID. + InvalidRequestError: If server_name is a UUID. """ if not server_name or not user_id: logger.warning( @@ -104,18 +103,28 @@ async def resolve_mcp_server_instance(server_name: str, user_id: str, db: AsyncS _assert_not_uuid(server_name, f"resolve_mcp_server_instance(user_id={user_id})") try: + from app.services.mcp_server_service import McpServerService + service = McpServerService(db) server = await service.repo.get_by_name(user_id, server_name) if not server: error_msg = f"MCP server not found by name: server_name={server_name}, user_id={user_id}" logger.error(f"[resolve_mcp_server_instance] {error_msg}") - raise RuntimeError(f"MCP server '{server_name}' not found.") + raise NotFoundError( + "MCP server not found", + code="MCP_SERVER_NOT_FOUND", + data={"server_name": server_name, "user_id": user_id}, + ) if server.deleted_at: error_msg = f"MCP server is deleted: server_name={server_name}, user_id={user_id}" logger.error(f"[resolve_mcp_server_instance] {error_msg}") - raise RuntimeError(f"MCP server '{server_name}' has been deleted.") + raise NotFoundError( + "MCP server has been deleted", + code="MCP_SERVER_NOT_FOUND", + data={"server_name": server_name, "user_id": user_id}, + ) logger.debug( f"[resolve_mcp_server_instance] Found server: " @@ -123,11 +132,15 @@ async def resolve_mcp_server_instance(server_name: str, user_id: str, db: AsyncS ) return server - except RuntimeError: + except (NotFoundError, InvalidRequestError, AccessDeniedError, ServiceUnavailableError): raise except Exception as e: logger.error(f"[resolve_mcp_server_instance] Error resolving MCP server instance: {e}", exc_info=True) - raise RuntimeError(f"Error resolving MCP server '{server_name}': {str(e)}") + raise ServiceUnavailableError( + "Failed to resolve MCP server instance", + code="MCP_SERVER_RESOLVE_FAILED", + data={"server_name": server_name, "user_id": user_id, "detail": str(e)}, + ) async def validate_mcp_server_for_tool(server: McpServer, user_id: str) -> bool: @@ -141,22 +154,30 @@ async def validate_mcp_server_for_tool(server: McpServer, user_id: str) -> bool: True if the server is usable. Raises: - RuntimeError: If validation fails. + AccessDeniedError | InvalidRequestError: If validation fails. """ if not server: - raise RuntimeError("MCP server instance is None.") + raise NotFoundError("MCP server instance not found", code="MCP_SERVER_NOT_FOUND", data=None) # verify user ownership if server.user_id != user_id: error_msg = f"User {user_id} does not own server {server.name}" logger.error(f"[validate_mcp_server_for_tool] {error_msg}") - raise RuntimeError(f"Permission denied: You do not own MCP server '{server.name}'.") + raise AccessDeniedError( + "You do not own this MCP server", + code="MCP_SERVER_ACCESS_DENIED", + data={"server_name": server.name, "user_id": user_id}, + ) # verify server is enabled if not server.enabled: error_msg = f"Server {server.name} is disabled" logger.warning(f"[validate_mcp_server_for_tool] {error_msg}") - raise RuntimeError(f"MCP server '{server.name}' is disabled.") + raise InvalidRequestError( + "MCP server is disabled", + code="MCP_SERVER_DISABLED", + data={"server_name": server.name}, + ) return True @@ -181,14 +202,16 @@ async def get_mcp_tool_with_instance( EnhancedTool instance. Raises: - RuntimeError: If any validation step fails. + AppError: If any validation step fails. """ # 1. resolve MCP server instance server = await resolve_mcp_server_instance(server_name, user_id, db) if not server: - # resolve_mcp_server_instance now raises RuntimeError, so this branch might be unreachable - # but kept for robustness if resolve_mcp_server_instance returns None - raise RuntimeError(f"MCP server '{server_name}' not found.") + raise NotFoundError( + "MCP server not found", + code="MCP_SERVER_NOT_FOUND", + data={"server_name": server_name, "user_id": user_id}, + ) # 2. validate server instance await validate_mcp_server_for_tool(server, user_id) @@ -200,7 +223,11 @@ async def get_mcp_tool_with_instance( if not tool: error_msg = f"Tool not found in registry: server_name={server_name}, tool_name={tool_name}" logger.error(f"[get_mcp_tool_with_instance] {error_msg}") - raise RuntimeError(f"MCP tool '{tool_name}' not found on server '{server_name}'.") + raise NotFoundError( + "MCP tool not found", + code="MCP_TOOL_NOT_FOUND", + data={"server_name": server_name, "tool_name": tool_name}, + ) logger.debug( f"[get_mcp_tool_with_instance] Successfully retrieved tool: server_name={server_name}, tool_name={tool_name}" diff --git a/backend/app/core/tools/sandbox/sandbox_factory.py b/backend/app/core/tools/sandbox/sandbox_factory.py index 616a07330..daf35d793 100644 --- a/backend/app/core/tools/sandbox/sandbox_factory.py +++ b/backend/app/core/tools/sandbox/sandbox_factory.py @@ -12,6 +12,8 @@ from deepagents.backends.protocol import SandboxBackendProtocol from loguru import logger +from app.common.app_errors import InvalidRequestError, ServiceUnavailableError + if TYPE_CHECKING: from app.core.agent.backends.pydantic_adapter import RuntimeConfig @@ -43,8 +45,11 @@ def _run_sandbox_setup(backend: SandboxBackendProtocol, setup_script_path: str) if result.exit_code != 0: logger.info(f"[red]❌ Setup script failed (exit {result.exit_code}):[/red]") logger.info(f"[dim]{result.output}[/dim]") - msg = "Setup failed - aborting" - raise RuntimeError(msg) + raise ServiceUnavailableError( + "Sandbox setup script failed", + code="SANDBOX_SETUP_FAILED", + data={"setup_script_path": setup_script_path, "exit_code": result.exit_code, "output": result.output}, + ) logger.info("[green]✓ Setup complete[/green]") @@ -88,8 +93,11 @@ def create_modal_sandbox( # Poll until running (Modal requires this) for _ in range(90): # 180s timeout (90 * 2s) if sandbox.poll() is not None: # Sandbox terminated unexpectedly - msg = "Modal sandbox terminated unexpectedly during startup" - raise RuntimeError(msg) + raise ServiceUnavailableError( + "Modal sandbox terminated unexpectedly during startup", + code="MODAL_SANDBOX_STARTUP_FAILED", + data={"sandbox_id": sandbox_id}, + ) # Check if sandbox is ready by attempting a simple command try: process = sandbox.exec("echo", "ready", timeout=5) @@ -102,8 +110,11 @@ def create_modal_sandbox( else: # Timeout - cleanup and fail sandbox.terminate() - msg = "Modal sandbox failed to start within 180 seconds" - raise RuntimeError(msg) + raise ServiceUnavailableError( + "Modal sandbox failed to start within timeout", + code="MODAL_SANDBOX_STARTUP_TIMEOUT", + data={"sandbox_id": sandbox_id, "timeout_seconds": 180}, + ) backend = ModalBackend(sandbox) logger.info(f"[green]✓ Modal sandbox ready: {backend.id}[/green]") @@ -149,8 +160,11 @@ def create_runloop_sandbox( bearer_token = os.environ.get("RUNLOOP_API_KEY") if not bearer_token: - msg = "RUNLOOP_API_KEY environment variable not set" - raise ValueError(msg) + raise InvalidRequestError( + "RUNLOOP_API_KEY environment variable is required", + code="RUNLOOP_API_KEY_MISSING", + data=None, + ) client = Runloop(bearer_token=bearer_token) @@ -173,8 +187,11 @@ def create_runloop_sandbox( else: # Timeout - cleanup and fail client.devboxes.shutdown(id=devbox.id) - msg = "Devbox failed to start within 180 seconds" - raise RuntimeError(msg) + raise ServiceUnavailableError( + "Runloop devbox failed to start within timeout", + code="RUNLOOP_DEVBOX_STARTUP_TIMEOUT", + data={"sandbox_id": sandbox_id, "timeout_seconds": 180}, + ) logger.info(f"[green]✓ Runloop devbox ready: {sandbox_id}[/green]") @@ -350,8 +367,11 @@ def create_sandbox( SandboxBackend instance """ if provider not in _SANDBOX_PROVIDERS: - msg = f"Unknown sandbox provider: {provider}. Available providers: {', '.join(get_available_sandbox_types())}" - raise ValueError(msg) + raise InvalidRequestError( + "Unknown sandbox provider", + code="SANDBOX_PROVIDER_UNKNOWN", + data={"provider": provider, "available_providers": get_available_sandbox_types()}, + ) sandbox_provider = _SANDBOX_PROVIDERS[provider] @@ -382,8 +402,11 @@ def get_default_working_dir(provider: str) -> str: """ if provider in _PROVIDER_TO_WORKING_DIR: return _PROVIDER_TO_WORKING_DIR[provider] - msg = f"Unknown sandbox provider: {provider}" - raise ValueError(msg) + raise InvalidRequestError( + "Unknown sandbox provider", + code="SANDBOX_PROVIDER_UNKNOWN", + data={"provider": provider, "available_providers": list(_PROVIDER_TO_WORKING_DIR.keys())}, + ) __all__ = [ diff --git a/backend/app/core/trace_context.py b/backend/app/core/trace_context.py index 3f7d0aa58..69b41338b 100644 --- a/backend/app/core/trace_context.py +++ b/backend/app/core/trace_context.py @@ -1,5 +1,8 @@ """Trace context propagation via contextvars. +DEPRECATED: Use ObservationCollector.trace_id instead for new code. +Retained because logging.py and a2a/client.py still import it. + Provides a single trace_id that flows through the entire async call chain: HTTP middleware / WS handler → StreamState → LangGraph → tools → persistence. """ diff --git a/backend/app/main.py b/backend/app/main.py index 7ef2b5f0f..de28cad1a 100644 --- a/backend/app/main.py +++ b/backend/app/main.py @@ -2,29 +2,33 @@ FastAPI Main Application """ -import asyncio -from contextlib import asynccontextmanager -from datetime import datetime, timedelta, timezone -from typing import AsyncGenerator - -from fastapi import FastAPI, WebSocket, WebSocketDisconnect -from fastapi.middleware.cors import CORSMiddleware -from fastapi.responses import JSONResponse -from loguru import logger -from sqlalchemy import text - -from app.api import api_router -from app.api.v1.sessions import router as sessions_router -from app.common.exceptions import register_exception_handlers -from app.common.logging import LoggingMiddleware, setup_logging -from app.core.database import AsyncSessionLocal, close_db, engine -from app.core.redis import RedisClient -from app.core.settings import settings -from app.websocket.auth import WebSocketCloseCode, authenticate_websocket, reject_websocket -from app.websocket.chat_ws_handler import ChatWsHandler -from app.websocket.notification_manager import NotificationType, notification_manager -from app.websocket.openclaw_handler import openclaw_bridge_handler -from app.websocket.run_subscription_handler import run_subscription_handler +from __future__ import annotations + +import asyncio # noqa: E402 +from contextlib import asynccontextmanager # noqa: E402 +from typing import AsyncGenerator, Optional # noqa: E402 + +from dotenv import load_dotenv # noqa: E402 + +from app.core.settings import ENV_FILE # noqa: E402 + +load_dotenv(ENV_FILE, override=False) + +from fastapi import FastAPI, WebSocket, WebSocketDisconnect # noqa: E402 +from fastapi.middleware.cors import CORSMiddleware # noqa: E402 +from loguru import logger # noqa: E402 +from sqlalchemy import text # noqa: E402 + +from app.api import api_router # noqa: E402 +from app.common.exceptions import register_exception_handlers # noqa: E402 +from app.common.logging import LoggingMiddleware, setup_logging # noqa: E402 +from app.core.database import AsyncSessionLocal, close_db, engine # noqa: E402 +from app.core.redis import RedisClient # noqa: E402 +from app.core.settings import settings # noqa: E402 +from app.websocket.auth import WebSocketCloseCode, authenticate_websocket, reject_websocket # noqa: E402 +from app.websocket.execution_subscription_handler import execution_subscription_handler # noqa: E402 +from app.websocket.notification_manager import NotificationType, notification_manager # noqa: E402 +from app.websocket.openclaw_handler import openclaw_bridge_handler # noqa: E402 setup_logging() @@ -107,29 +111,8 @@ async def lifespan(app: FastAPI) -> AsyncGenerator: # Check Docker availability (non-blocking, just warn) await _check_docker_availability() - # Recover stale in-process durable runs that lost their executing runtime. - try: - from app.services.run_service import RunService - - stale_before = datetime.now(timezone.utc) - timedelta(seconds=settings.run_heartbeat_timeout_seconds) - async with RedisClient.lock("init:durable_run_recovery", timeout=60, blocking_timeout=60): - async with AsyncSessionLocal() as db: - run_service = RunService(db) - recovered_runs = await run_service.recover_stale_incomplete_runs( - runtime_owner_id=settings.run_runtime_instance_id, - stale_before=stale_before, - ) - if recovered_runs: - logger.warning( - f" ⚠️ Recovered {len(recovered_runs)} stale durable runs for runtime owner " - f"{settings.run_runtime_instance_id}" - ) - else: - logger.info( - f" ✓ Durable run recovery sweep completed for runtime owner {settings.run_runtime_instance_id}" - ) - except Exception as e: - logger.warning(f" ⚠️ Durable run recovery sweep failed: {e}") + # Durable run recovery is no longer needed. AgentRun lifecycle is now managed + # by AgentRunService which does not require startup recovery. # Automatically sync providers and models to database on startup (if not present) try: @@ -191,8 +174,86 @@ async def lifespan(app: FastAPI) -> AsyncGenerator: logger.warning(f" ⚠️ Checkpointer initialization failed: {e}") logger.warning(" App will continue starting, checkpoint features may be unavailable") + # Initialize CLI runtime providers + try: + from app.core.agent.cli_backends.registry import init_providers + + init_providers() + logger.info(" ✓ CLI runtime providers initialized") + except Exception as e: + logger.warning(f" ⚠️ CLI runtime provider initialization failed: {e}") + + # Register execution event bus subscribers + try: + from app.core.events.bus import execution_event_bus + from app.core.events.subscribers.persistence import PersistenceSubscriber + from app.core.events.subscribers.state_transition import StateTransitionSubscriber + from app.core.events.subscribers.task_sync import TaskSyncSubscriber + from app.core.events.subscribers.websocket import WebSocketSubscriber + + execution_event_bus.register(PersistenceSubscriber()) + execution_event_bus.register(StateTransitionSubscriber()) + execution_event_bus.register(WebSocketSubscriber()) + execution_event_bus.register(TaskSyncSubscriber()) + logger.info(" ✓ Execution event bus subscribers registered") + except Exception as e: + logger.error(f" ⚠️ Event bus subscriber registration failed: {e}") + + # Start container pool reaper (idle containers cleaned up every 5 min) + _reaper_task: Optional[asyncio.Task] = None + try: + from app.core.agent.cli_backends.container_pool import container_pool + + async def _container_reaper() -> None: + while True: + await asyncio.sleep(300) + try: + removed = await container_pool.cleanup_idle() + if removed: + logger.info(f"Container reaper: removed {removed} idle containers") + except Exception as e: + logger.warning(f"Container reaper error: {e}") + + _reaper_task = asyncio.create_task(_container_reaper(), name="container-reaper") + logger.info(" ✓ Container pool reaper started (idle_timeout=30m)") + except Exception as e: + logger.warning(f" ⚠️ Container pool reaper failed to start: {e}") + + # Scheduler: stale execution recovery (one-shot) + periodic loops + _dispatcher_task: Optional[asyncio.Task] = None + _exec_reaper_task: Optional[asyncio.Task] = None + try: + from app.core.scheduler import ( + execution_reaper_loop, + recover_stale_on_startup, + task_dispatcher_loop, + ) + + await recover_stale_on_startup() + _dispatcher_task = asyncio.create_task(task_dispatcher_loop(), name="task-dispatcher") + _exec_reaper_task = asyncio.create_task(execution_reaper_loop(), name="execution-reaper") + logger.info(" ✓ Task dispatcher and execution reaper started (interval=30s)") + except Exception as e: + logger.warning(f" ⚠️ Scheduler startup failed: {e}") + yield + # Shutdown: Cancel scheduler loops + for task in (_dispatcher_task, _exec_reaper_task): + if task: + task.cancel() + + # Shutdown: Cancel container pool reaper and mark pool as shut down + try: + if _reaper_task: + _reaper_task.cancel() + from app.core.agent.cli_backends.container_pool import container_pool as _cp + + await _cp.shutdown() + logger.info(" ✓ Container pool shut down (containers left running)") + except Exception as e: + logger.warning(f" ⚠️ Container pool shutdown failed: {e}") + # Shutdown: Drain sandbox pool (stop all containers gracefully) try: from app.services.sandbox_manager import _sandbox_pool @@ -254,21 +315,8 @@ async def lifespan(app: FastAPI) -> AsyncGenerator: ) -@app.exception_handler(Exception) -async def global_exception_handler(request, exc): - """Global exception handler""" - logger.opt(exception=True).error(f"Unhandled exception: {exc}") - return JSONResponse( - status_code=500, - content={"detail": "Internal server error"}, - ) - - app.include_router(api_router, prefix="/api") -# Sessions router mounted outside /api/v1 to keep /api/sessions path compatible -app.include_router(sessions_router, prefix="/api/sessions", tags=["sessions"]) - # Register Router @app.get("/", tags=["Root"]) @@ -282,19 +330,6 @@ async def root(): } -@app.websocket("/ws/chat") -async def chat_websocket_endpoint(websocket: WebSocket): - """Persistent WebSocket endpoint for Chat page streaming.""" - is_authenticated, user_id = await authenticate_websocket(websocket) - if not is_authenticated or not user_id: - await reject_websocket(websocket, code=WebSocketCloseCode.UNAUTHORIZED, reason="Authentication required") - return - - await websocket.accept() - handler = ChatWsHandler(user_id=str(user_id), websocket=websocket) - await handler.run() - - async def _run_notification_loop(websocket: WebSocket, user_id: str) -> None: """Shared ping/pong loop for notification WebSocket endpoints.""" import json @@ -335,16 +370,16 @@ async def notification_websocket_endpoint(websocket: WebSocket): await _run_notification_loop(websocket, user_id) -@app.websocket("/ws/runs") -async def runs_websocket_endpoint(websocket: WebSocket): - """Subscription endpoint for durable run snapshot/replay/live events.""" +@app.websocket("/ws/executions") +async def executions_websocket_endpoint(websocket: WebSocket): + """Subscription endpoint for CLI execution snapshot/replay/live events.""" is_authenticated, user_id = await authenticate_websocket(websocket) if not is_authenticated or not user_id: await reject_websocket(websocket, code=WebSocketCloseCode.UNAUTHORIZED, reason="Authentication required") return - await run_subscription_handler.handle_connection(websocket, str(user_id)) + await execution_subscription_handler.handle_connection(websocket, str(user_id)) @app.websocket("/ws/openclaw/dashboard") diff --git a/backend/app/models/__init__.py b/backend/app/models/__init__.py index 761a6150f..a8959079e 100644 --- a/backend/app/models/__init__.py +++ b/backend/app/models/__init__.py @@ -2,7 +2,6 @@ Data models """ -from app.models.conversation import Conversation from app.models.message import Message from .access_control import ( @@ -11,12 +10,14 @@ WorkspaceInvitation, WorkspaceInvitationStatus, ) -from .agent_run import AgentRun, AgentRunEvent, AgentRunSnapshot, AgentRunStatus +from .agent import Agent, AgentRelease, AgentVersion +from .agent_run import AgentRun from .auth import AuthSession, AuthUser from .auth import AuthUser as User from .base import BaseModel, SoftDeleteMixin, TimestampMixin from .chat import Chat from .custom_tool import CustomTool +from .execution import Artifact, Execution, ExecutionEvent from .execution_trace import ( ExecutionObservation, ExecutionTrace, @@ -25,9 +26,6 @@ ObservationType, TraceStatus, ) -from .graph import AgentGraph, GraphEdge, GraphNode, GraphNodeSecret -from .graph_deployment_version import GraphDeploymentVersion -from .graph_execution import ExecutionStatus, GraphExecution from .mcp import McpServer from .memory import Memory from .model_credential import ModelCredential @@ -42,17 +40,15 @@ from .skill import Skill, SkillFile from .skill_collaborator import CollaboratorRole, SkillCollaborator from .skill_version import SkillVersion, SkillVersionFile +from .task import Task, TaskPriority, TaskStatus +from .task_activity import ActivityAuthorType, ActivityType, TaskActivity +from .thread import Thread from .user_sandbox import UserSandbox -from .workspace import Workspace, WorkspaceFolder, WorkspaceMember, WorkspaceMemberRole, WorkspaceStatus -from .workspace_files import WorkspaceFile, WorkspaceStoredFile +from .workspace import Workspace, WorkspaceMember, WorkspaceMemberRole, WorkspaceStatus __all__ = [ "BaseModel", "AgentRun", - "AgentRunEvent", - "AgentRunSnapshot", - "AgentRunStatus", - "Conversation", "Message", "TimestampMixin", "SoftDeleteMixin", @@ -64,7 +60,6 @@ "WorkspaceMember", "WorkspaceStatus", "WorkspaceMemberRole", - "WorkspaceFolder", "UserSandbox", "Chat", "Organization", @@ -76,17 +71,8 @@ "Environment", "WorkspaceEnvironment", "Settings", - "WorkspaceFile", - "WorkspaceStoredFile", "CustomTool", "McpServer", - "AgentGraph", - "GraphNode", - "GraphEdge", - "GraphNodeSecret", - "GraphDeploymentVersion", - "GraphExecution", - "ExecutionStatus", "ModelProvider", "ModelCredential", "ModelInstance", @@ -106,4 +92,17 @@ "SkillVersion", "SkillVersionFile", "PlatformToken", + "Task", + "TaskStatus", + "TaskPriority", + "Agent", + "AgentRelease", + "AgentVersion", + "Execution", + "ExecutionEvent", + "Artifact", + "TaskActivity", + "ActivityAuthorType", + "ActivityType", + "Thread", ] diff --git a/backend/app/models/agent.py b/backend/app/models/agent.py new file mode 100644 index 000000000..98f02d784 --- /dev/null +++ b/backend/app/models/agent.py @@ -0,0 +1,139 @@ +from __future__ import annotations + +import uuid +from datetime import datetime +from typing import TYPE_CHECKING, List, Optional + +from sqlalchemy import DateTime, Enum, ForeignKey, Index, Integer, String, Text, UniqueConstraint, func +from sqlalchemy.dialects.postgresql import JSONB, UUID +from sqlalchemy.orm import Mapped, mapped_column, relationship + +from app.core.contracts.agent import normalize_engine_kind, normalize_runtime_kind +from app.core.database import Base +from app.utils.datetime import utc_now + +from .base import BaseModel + +if TYPE_CHECKING: + pass + + +class Agent(BaseModel): + """An agent owned by a workspace.""" + + __tablename__ = "agents" + __table_args__ = ( + UniqueConstraint("workspace_id", "slug", name="uq_agents_workspace_slug"), + Index("ix_agents_workspace_id", "workspace_id"), + Index("ix_agents_status", "status"), + ) + + workspace_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), ForeignKey("workspaces.id", ondelete="CASCADE"), nullable=False + ) + name: Mapped[str] = mapped_column(String(255), nullable=False) + slug: Mapped[str] = mapped_column(String(255), nullable=False) + description: Mapped[Optional[str]] = mapped_column(Text, nullable=True) + avatar: Mapped[Optional[str]] = mapped_column(String(500), nullable=True) + status: Mapped[str] = mapped_column( + Enum("draft", "active", "archived", name="agent_status"), nullable=False, default="draft" + ) + current_draft_version_id: Mapped[Optional[uuid.UUID]] = mapped_column( + UUID(as_uuid=True), ForeignKey("agent_versions.id"), nullable=True + ) + active_release_id: Mapped[Optional[uuid.UUID]] = mapped_column( + UUID(as_uuid=True), ForeignKey("agent_releases.id"), nullable=True + ) + created_by: Mapped[str] = mapped_column(String(255), ForeignKey("user.id", ondelete="CASCADE"), nullable=False) + encrypted_custom_env: Mapped[Optional[str]] = mapped_column(Text, nullable=True) + + @property + def has_custom_env(self) -> bool: + return self.encrypted_custom_env is not None + + @property + def engine_kind(self) -> Optional[str]: + if not self.current_draft_version: + return None + return normalize_engine_kind(self.current_draft_version.engine_kind) + + @property + def runtime_kind(self) -> Optional[str]: + if not self.active_release: + return None + return normalize_runtime_kind(self.active_release.runtime_kind) + + # Relationships + versions: Mapped[List[AgentVersion]] = relationship( + "AgentVersion", + back_populates="agent", + foreign_keys="AgentVersion.agent_id", + ) + current_draft_version: Mapped[Optional[AgentVersion]] = relationship( + "AgentVersion", + foreign_keys=[current_draft_version_id], + ) + active_release: Mapped[Optional[AgentRelease]] = relationship( + "AgentRelease", + foreign_keys=[active_release_id], + ) + + +class AgentVersion(Base): + """An immutable snapshot of an agent's configuration at a point in time.""" + + __tablename__ = "agent_versions" + __table_args__ = (UniqueConstraint("agent_id", "version_number", name="uq_agent_versions_agent_version"),) + + id: Mapped[uuid.UUID] = mapped_column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4) + agent_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), ForeignKey("agents.id", ondelete="CASCADE"), nullable=False + ) + version_number: Mapped[int] = mapped_column(Integer, nullable=False) + status: Mapped[str] = mapped_column( + Enum("draft", "frozen", name="agent_version_status"), nullable=False, default="draft" + ) + source_kind: Mapped[str] = mapped_column(String(20), nullable=False, default="manual") + engine_kind: Mapped[str] = mapped_column(String(20), nullable=False) + definition_payload: Mapped[dict] = mapped_column(JSONB, nullable=False, default=dict) + capability_manifest: Mapped[dict] = mapped_column(JSONB, nullable=False, default=dict) + changelog: Mapped[Optional[str]] = mapped_column(Text, nullable=True) + created_by: Mapped[str] = mapped_column(String(255), ForeignKey("user.id", ondelete="CASCADE"), nullable=False) + created_at: Mapped[datetime] = mapped_column( + DateTime(timezone=True), default=utc_now, server_default=func.now(), nullable=False + ) + + # Relationships + agent: Mapped[Agent] = relationship( + "Agent", + back_populates="versions", + foreign_keys=[agent_id], + ) + + +class AgentRelease(Base): + """A release artifact built from a specific agent version.""" + + __tablename__ = "agent_releases" + __table_args__ = (UniqueConstraint("agent_version_id", "release_number", name="uq_agent_releases_version_number"),) + + id: Mapped[uuid.UUID] = mapped_column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4) + agent_version_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), ForeignKey("agent_versions.id"), nullable=False + ) + release_number: Mapped[int] = mapped_column(Integer, nullable=False) + status: Mapped[str] = mapped_column( + Enum("ready", "active", "superseded", "failed", "retired", name="agent_release_status"), + nullable=False, + default="ready", + ) + runtime_kind: Mapped[str] = mapped_column(String(20), nullable=False) + builder_kind: Mapped[Optional[str]] = mapped_column(String(20), nullable=True) + executable_ref: Mapped[Optional[dict]] = mapped_column(JSONB, nullable=True) + runtime_binding: Mapped[dict] = mapped_column(JSONB, nullable=False, default=dict) + published_by: Mapped[Optional[str]] = mapped_column(String(255), ForeignKey("user.id"), nullable=True) + published_at: Mapped[Optional[datetime]] = mapped_column(DateTime(timezone=True), nullable=True) + retired_at: Mapped[Optional[datetime]] = mapped_column(DateTime(timezone=True), nullable=True) + + # Relationships + version: Mapped[AgentVersion] = relationship("AgentVersion") diff --git a/backend/app/models/agent_run.py b/backend/app/models/agent_run.py index 3bdb0ef3f..c3f20f14e 100644 --- a/backend/app/models/agent_run.py +++ b/backend/app/models/agent_run.py @@ -1,118 +1,61 @@ -""" -Agent run persistence models. -""" +from __future__ import annotations -import enum import uuid from datetime import datetime -from typing import Optional +from typing import TYPE_CHECKING, List, Optional -from sqlalchemy import BigInteger, DateTime, Enum, ForeignKey, Index, String, Text, UniqueConstraint +from sqlalchemy import CheckConstraint, DateTime, Enum, ForeignKey, String, Text from sqlalchemy.dialects.postgresql import JSONB, UUID -from sqlalchemy.orm import Mapped, mapped_column +from sqlalchemy.orm import Mapped, mapped_column, relationship from app.core.database import Base from app.utils.datetime import utc_now -from .base import BaseModel, TimestampMixin +if TYPE_CHECKING: + from .agent import AgentRelease, AgentVersion + from .execution import Execution -class AgentRunStatus(str, enum.Enum): - QUEUED = "queued" - RUNNING = "running" - INTERRUPT_WAIT = "interrupt_wait" - COMPLETED = "completed" - FAILED = "failed" - CANCELLED = "cancelled" - - -class AgentRun(BaseModel): - """Durable long-running task record.""" - +class AgentRun(Base): __tablename__ = "agent_runs" - - user_id: Mapped[str] = mapped_column( - String(255), - ForeignKey("user.id", ondelete="CASCADE"), - nullable=False, - ) - workspace_id: Mapped[Optional[uuid.UUID]] = mapped_column( - UUID(as_uuid=True), - ForeignKey("workspaces.id", ondelete="SET NULL"), - nullable=True, - ) - graph_id: Mapped[Optional[uuid.UUID]] = mapped_column( - UUID(as_uuid=True), - ForeignKey("graphs.id", ondelete="SET NULL"), - nullable=True, - ) - thread_id: Mapped[Optional[str]] = mapped_column(String(100), nullable=True) - - run_type: Mapped[str] = mapped_column(String(100), nullable=False) - agent_name: Mapped[str] = mapped_column(String(100), nullable=False) - source: Mapped[str] = mapped_column(String(100), nullable=False) - status: Mapped[AgentRunStatus] = mapped_column( - Enum(AgentRunStatus, values_callable=lambda e: [m.value for m in e], name="agentrunstatus"), - nullable=False, - default=AgentRunStatus.QUEUED, - ) - - title: Mapped[Optional[str]] = mapped_column(String(255), nullable=True) - request_payload: Mapped[Optional[dict]] = mapped_column(JSONB, nullable=True) - result_summary: Mapped[Optional[dict]] = mapped_column(JSONB, nullable=True) - error_code: Mapped[Optional[str]] = mapped_column(String(100), nullable=True) - error_message: Mapped[Optional[str]] = mapped_column(Text, nullable=True) - runtime_owner_id: Mapped[Optional[str]] = mapped_column(String(255), nullable=True) - last_heartbeat_at: Mapped[Optional[datetime]] = mapped_column(DateTime(timezone=True), nullable=True) - - trace_id: Mapped[Optional[uuid.UUID]] = mapped_column(UUID(as_uuid=True), nullable=True) - started_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), nullable=False, default=utc_now) - finished_at: Mapped[Optional[datetime]] = mapped_column(DateTime(timezone=True), nullable=True) - last_seq: Mapped[int] = mapped_column(BigInteger, nullable=False, default=0) - __table_args__ = ( - Index("agent_runs_user_created_idx", "user_id", "created_at"), - Index("agent_runs_thread_created_idx", "thread_id", "created_at"), - Index("agent_runs_graph_created_idx", "graph_id", "created_at"), - Index("agent_runs_status_updated_idx", "status", "updated_at"), - Index("agent_runs_agent_updated_idx", "agent_name", "updated_at"), - Index("agent_runs_owner_status_idx", "runtime_owner_id", "status"), - ) - - -class AgentRunEvent(BaseModel): - """Append-only ordered event stream for a run.""" - - __tablename__ = "agent_run_events" - - run_id: Mapped[uuid.UUID] = mapped_column( - UUID(as_uuid=True), - ForeignKey("agent_runs.id", ondelete="CASCADE"), + CheckConstraint( + "(release_id IS NOT NULL) <> (agent_version_id IS NOT NULL)", + name="ck_agent_runs_release_or_version", + ), + ) + + id: Mapped[uuid.UUID] = mapped_column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4) + release_id: Mapped[Optional[uuid.UUID]] = mapped_column( + UUID(as_uuid=True), ForeignKey("agent_releases.id"), nullable=True + ) + agent_version_id: Mapped[Optional[uuid.UUID]] = mapped_column( + UUID(as_uuid=True), ForeignKey("agent_versions.id"), nullable=True + ) + workspace_id: Mapped[uuid.UUID] = mapped_column(UUID(as_uuid=True), ForeignKey("workspaces.id"), nullable=False) + thread_id: Mapped[Optional[uuid.UUID]] = mapped_column(UUID(as_uuid=True), ForeignKey("threads.id"), nullable=True) + task_id: Mapped[Optional[uuid.UUID]] = mapped_column(UUID(as_uuid=True), ForeignKey("tasks.id"), nullable=True) + trigger_medium: Mapped[str] = mapped_column(String(20), nullable=False) + run_purpose: Mapped[str] = mapped_column(String(20), nullable=False) + goal: Mapped[Optional[str]] = mapped_column(Text, nullable=True) + input_payload: Mapped[Optional[dict]] = mapped_column(JSONB, nullable=True) + status: Mapped[str] = mapped_column( + Enum("pending", "running", "succeeded", "failed", "cancelled", name="agent_run_status"), nullable=False, + default="pending", ) - seq: Mapped[int] = mapped_column(BigInteger, nullable=False) - event_type: Mapped[str] = mapped_column(String(100), nullable=False) - payload: Mapped[dict] = mapped_column(JSONB, nullable=False, default=dict) - trace_id: Mapped[Optional[uuid.UUID]] = mapped_column(UUID(as_uuid=True), nullable=True) - observation_id: Mapped[Optional[uuid.UUID]] = mapped_column(UUID(as_uuid=True), nullable=True) - parent_observation_id: Mapped[Optional[uuid.UUID]] = mapped_column(UUID(as_uuid=True), nullable=True) - - __table_args__ = ( - UniqueConstraint("run_id", "seq", name="uq_agent_run_events_run_seq"), - Index("agent_run_events_run_created_idx", "run_id", "created_at"), + current_execution_id: Mapped[Optional[uuid.UUID]] = mapped_column( + UUID(as_uuid=True), ForeignKey("executions.id"), nullable=True ) + result_summary: Mapped[Optional[str]] = mapped_column(Text, nullable=True) + started_at: Mapped[Optional[datetime]] = mapped_column(DateTime(timezone=True), nullable=True) + ended_at: Mapped[Optional[datetime]] = mapped_column(DateTime(timezone=True), nullable=True) + created_by: Mapped[Optional[str]] = mapped_column(String(255), ForeignKey("user.id"), nullable=True) + created_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), default=utc_now, nullable=False) - -class AgentRunSnapshot(Base, TimestampMixin): - """Latest UI projection for a run.""" - - __tablename__ = "agent_run_snapshots" - - run_id: Mapped[uuid.UUID] = mapped_column( - UUID(as_uuid=True), - ForeignKey("agent_runs.id", ondelete="CASCADE"), - primary_key=True, + release: Mapped[Optional["AgentRelease"]] = relationship("AgentRelease") + agent_version: Mapped[Optional["AgentVersion"]] = relationship("AgentVersion") + current_execution: Mapped[Optional["Execution"]] = relationship("Execution", foreign_keys=[current_execution_id]) + executions: Mapped[List["Execution"]] = relationship( + "Execution", back_populates="run", foreign_keys="Execution.run_id" ) - last_seq: Mapped[int] = mapped_column(BigInteger, nullable=False, default=0) - status: Mapped[str] = mapped_column(String(100), nullable=False) - projection: Mapped[dict] = mapped_column(JSONB, nullable=False, default=dict) diff --git a/backend/app/models/conversation.py b/backend/app/models/conversation.py deleted file mode 100644 index d89bd8beb..000000000 --- a/backend/app/models/conversation.py +++ /dev/null @@ -1,53 +0,0 @@ -""" -Conversation model - -Manage conversations for the LangGraph dialogue system. -""" - -from typing import TYPE_CHECKING - -from sqlalchemy import JSON, ForeignKey, Integer, String -from sqlalchemy.orm import Mapped, mapped_column, relationship - -from app.models.base import BaseModel, SoftDeleteMixin - -if TYPE_CHECKING: - from app.models.message import Message - - -class Conversation(BaseModel, SoftDeleteMixin): - """Conversation table -- store dialogue session information. - - Inherit from BaseTableMixin with the following columns: - - id: primary key - - create_by: creator - - update_by: updater - - create_time: creation timestamp - - update_time: update timestamp - - deleted: soft-delete flag - """ - - __tablename__ = "conversations" - - thread_id: Mapped[str] = mapped_column(String(100), unique=True, index=True, nullable=False, comment="thread ID") - user_id: Mapped[str] = mapped_column( - String(255), - ForeignKey("user.id", ondelete="CASCADE"), - index=True, - nullable=False, - comment="user ID (text)", - ) - title: Mapped[str] = mapped_column(String(200), nullable=False, comment="conversation title") - meta_data: Mapped[dict] = mapped_column(JSON, nullable=True, default=dict, comment="metadata") - is_active: Mapped[int] = mapped_column(Integer, nullable=False, default=1, comment="active flag (0=no, 1=yes)") - - # relationship: a conversation has many messages; cascade delete on conversation removal - messages: Mapped[list["Message"]] = relationship( - "Message", - cascade="all, delete-orphan", - passive_deletes=True, - lazy="selectin", - ) - - def __repr__(self) -> str: - return f"" diff --git a/backend/app/models/execution.py b/backend/app/models/execution.py new file mode 100644 index 000000000..4bacf7a2f --- /dev/null +++ b/backend/app/models/execution.py @@ -0,0 +1,78 @@ +from __future__ import annotations + +import uuid +from datetime import datetime +from typing import TYPE_CHECKING, List, Optional + +from sqlalchemy import DateTime, Enum, ForeignKey, Integer, String, Text, UniqueConstraint +from sqlalchemy.dialects.postgresql import JSONB, UUID +from sqlalchemy.orm import Mapped, mapped_column, relationship + +from app.core.database import Base +from app.utils.datetime import utc_now + +if TYPE_CHECKING: + from .agent_run import AgentRun + + +class Execution(Base): + __tablename__ = "executions" + __table_args__ = (UniqueConstraint("run_id", "attempt_index", name="uq_executions_run_attempt"),) + + id: Mapped[uuid.UUID] = mapped_column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4) + run_id: Mapped[uuid.UUID] = mapped_column(UUID(as_uuid=True), ForeignKey("agent_runs.id"), nullable=False) + parent_execution_id: Mapped[Optional[uuid.UUID]] = mapped_column( + UUID(as_uuid=True), ForeignKey("executions.id"), nullable=True + ) + attempt_index: Mapped[int] = mapped_column(Integer, nullable=False, default=1) + engine_kind: Mapped[str] = mapped_column(String(20), nullable=False) + runtime_session_ref: Mapped[Optional[str]] = mapped_column(String(500), nullable=True) + status: Mapped[str] = mapped_column( + Enum( + "pending", + "dispatched", + "running", + "approval_wait", + "succeeded", + "failed", + "cancelled", + name="execution_status", + ), + nullable=False, + default="pending", + ) + error: Mapped[Optional[dict]] = mapped_column(JSONB, nullable=True) + metrics: Mapped[Optional[dict]] = mapped_column(JSONB, nullable=True) + started_at: Mapped[Optional[datetime]] = mapped_column(DateTime(timezone=True), nullable=True) + ended_at: Mapped[Optional[datetime]] = mapped_column(DateTime(timezone=True), nullable=True) + created_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), default=utc_now, nullable=False) + + run: Mapped["AgentRun"] = relationship("AgentRun", back_populates="executions", foreign_keys=[run_id]) + events: Mapped[List["ExecutionEvent"]] = relationship("ExecutionEvent", back_populates="execution") + children: Mapped[List["Execution"]] = relationship("Execution", foreign_keys=[parent_execution_id]) + artifacts: Mapped[List["Artifact"]] = relationship("Artifact", back_populates="execution") + + +class ExecutionEvent(Base): + __tablename__ = "execution_events" + __table_args__ = (UniqueConstraint("execution_id", "sequence_no", name="uq_execution_events_seq"),) + + id: Mapped[uuid.UUID] = mapped_column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4) + execution_id: Mapped[uuid.UUID] = mapped_column(UUID(as_uuid=True), ForeignKey("executions.id"), nullable=False) + sequence_no: Mapped[int] = mapped_column(Integer, nullable=False) + event_type: Mapped[str] = mapped_column(String(50), nullable=False) + payload: Mapped[dict] = mapped_column(JSONB, nullable=False, default=dict) + created_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), default=utc_now, nullable=False) + + execution: Mapped["Execution"] = relationship("Execution", back_populates="events") + + +class Artifact(Base): + __tablename__ = "artifacts" + id: Mapped[uuid.UUID] = mapped_column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4) + execution_id: Mapped[uuid.UUID] = mapped_column(UUID(as_uuid=True), ForeignKey("executions.id"), nullable=False) + kind: Mapped[str] = mapped_column(String(50), nullable=False) + uri: Mapped[str] = mapped_column(Text, nullable=False) + metadata_: Mapped[Optional[dict]] = mapped_column("metadata", JSONB, nullable=True) + created_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), default=utc_now, nullable=False) + execution: Mapped["Execution"] = relationship("Execution", back_populates="artifacts") diff --git a/backend/app/models/graph.py b/backend/app/models/graph.py deleted file mode 100644 index 753cc3a73..000000000 --- a/backend/app/models/graph.py +++ /dev/null @@ -1,217 +0,0 @@ -""" -Graph models -""" - -import uuid -from datetime import datetime -from typing import TYPE_CHECKING, List, Optional - -from sqlalchemy import ( - Boolean, - DateTime, - ForeignKey, - Index, - Numeric, - String, - Text, -) -from sqlalchemy.dialects.postgresql import JSONB, UUID -from sqlalchemy.orm import Mapped, mapped_column, relationship - -from .base import BaseModel, SoftDeleteMixin - -if TYPE_CHECKING: - from .auth import AuthUser - from .graph_deployment_version import GraphDeploymentVersion - from .workspace import Workspace, WorkspaceFolder - - -class AgentGraph(BaseModel, SoftDeleteMixin): - """Agent graph model (supports soft-delete).""" - - __tablename__ = "graphs" - - name: Mapped[str] = mapped_column(String(200), nullable=False) - description: Mapped[Optional[str]] = mapped_column(String(2000), nullable=True) - user_id: Mapped[str] = mapped_column( - String(255), - ForeignKey("user.id", ondelete="CASCADE"), - nullable=False, - ) - workspace_id: Mapped[Optional[uuid.UUID]] = mapped_column( - UUID(as_uuid=True), - ForeignKey("workspaces.id", ondelete="SET NULL"), - nullable=True, - ) - folder_id: Mapped[Optional[uuid.UUID]] = mapped_column( - UUID(as_uuid=True), - ForeignKey("workspace_folder.id", ondelete="SET NULL"), - nullable=True, - ) - parent_id: Mapped[Optional[uuid.UUID]] = mapped_column( - UUID(as_uuid=True), - ForeignKey("graphs.id", ondelete="SET NULL"), - nullable=True, - ) - color: Mapped[Optional[str]] = mapped_column(String(2000), nullable=True) - is_deployed: Mapped[bool] = mapped_column(Boolean, nullable=False, default=False) - variables: Mapped[dict] = mapped_column(JSONB, nullable=False, default=dict) - - # deployment fields — correspond to the sim project's workflow table - deployed_at: Mapped[Optional[datetime]] = mapped_column( - DateTime(timezone=True), - nullable=True, - default=None, - ) - - # relationships - user: Mapped["AuthUser"] = relationship("AuthUser", lazy="selectin") - workspace: Mapped[Optional["Workspace"]] = relationship("Workspace", lazy="selectin") - folder: Mapped[Optional["WorkspaceFolder"]] = relationship( - "WorkspaceFolder", - lazy="selectin", - ) - parent: Mapped[Optional["AgentGraph"]] = relationship( - "AgentGraph", - remote_side="AgentGraph.id", - lazy="selectin", - ) - nodes: Mapped[List["GraphNode"]] = relationship( - "GraphNode", - back_populates="graph", - cascade="all, delete-orphan", - ) - edges: Mapped[List["GraphEdge"]] = relationship( - "GraphEdge", - back_populates="graph", - cascade="all, delete-orphan", - ) - deployment_versions: Mapped[List["GraphDeploymentVersion"]] = relationship( - "GraphDeploymentVersion", - back_populates="graph", - cascade="all, delete-orphan", - order_by="GraphDeploymentVersion.version.desc()", - ) - __table_args__ = ( - Index("graphs_user_id_idx", "user_id"), - Index("graphs_workspace_id_idx", "workspace_id"), - Index("graphs_folder_id_idx", "folder_id"), - Index("graphs_parent_id_idx", "parent_id"), - ) - - -class GraphNode(BaseModel): - """Graph node model.""" - - __tablename__ = "graph_nodes" - - graph_id: Mapped[uuid.UUID] = mapped_column( - UUID(as_uuid=True), - ForeignKey("graphs.id", ondelete="CASCADE"), - nullable=False, - ) - position_x: Mapped[float] = mapped_column(Numeric, nullable=False) - position_y: Mapped[float] = mapped_column(Numeric, nullable=False) - position_absolute_x: Mapped[Optional[float]] = mapped_column(Numeric, nullable=True) - position_absolute_y: Mapped[Optional[float]] = mapped_column(Numeric, nullable=True) - width: Mapped[float] = mapped_column(Numeric, nullable=False, default=0) - height: Mapped[float] = mapped_column(Numeric, nullable=False, default=0) - data: Mapped[dict] = mapped_column(JSONB, nullable=False, default=dict) - type: Mapped[str] = mapped_column(String(50), nullable=False) - - # relationship - graph: Mapped["AgentGraph"] = relationship("AgentGraph", back_populates="nodes", lazy="selectin") - source_edges: Mapped[List["GraphEdge"]] = relationship( - "GraphEdge", - foreign_keys="GraphEdge.source_node_id", - back_populates="source_node", - cascade="all, delete-orphan", - ) - target_edges: Mapped[List["GraphEdge"]] = relationship( - "GraphEdge", - foreign_keys="GraphEdge.target_node_id", - back_populates="target_node", - cascade="all, delete-orphan", - ) - - __table_args__ = ( - Index("graph_nodes_graph_id_idx", "graph_id"), - Index("graph_nodes_type_idx", "type"), - ) - - -class GraphEdge(BaseModel): - """Graph edge model. - - Support conditional routing and complex flow patterns: - - data.route_key: routing key for conditional routing (maps to RouterNodeExecutor return value) - - data.source_handle_id: React Flow Handle ID (e.g. "Yes", "No", "Unknown") - - data.condition_expression: edge-level condition expression (optional) - - data.edge_type: edge type ("normal" | "conditional" | "loop_back") to distinguish edge kinds - - data.label: display label for the edge (optional), used for logging and debugging - """ - - __tablename__ = "graph_edges" - - graph_id: Mapped[uuid.UUID] = mapped_column( - UUID(as_uuid=True), - ForeignKey("graphs.id", ondelete="CASCADE"), - nullable=False, - ) - source_node_id: Mapped[uuid.UUID] = mapped_column( - UUID(as_uuid=True), - ForeignKey("graph_nodes.id", ondelete="CASCADE"), - nullable=False, - ) - target_node_id: Mapped[uuid.UUID] = mapped_column( - UUID(as_uuid=True), - ForeignKey("graph_nodes.id", ondelete="CASCADE"), - nullable=False, - ) - # edge metadata storing routing info - # structure: { "route_key": str, "source_handle_id": str, "condition_expression": str } - data: Mapped[dict] = mapped_column(JSONB, nullable=False, default=dict) - - # relationships - graph: Mapped["AgentGraph"] = relationship("AgentGraph", back_populates="edges", lazy="selectin") - source_node: Mapped["GraphNode"] = relationship( - "GraphNode", - foreign_keys=[source_node_id], - back_populates="source_edges", - lazy="selectin", - ) - target_node: Mapped["GraphNode"] = relationship( - "GraphNode", - foreign_keys=[target_node_id], - back_populates="target_edges", - lazy="selectin", - ) - - __table_args__ = ( - Index("graph_edges_graph_id_idx", "graph_id"), - Index("graph_edges_source_node_id_idx", "source_node_id"), - Index("graph_edges_target_node_id_idx", "target_node_id"), - Index("graph_edges_graph_source_idx", "graph_id", "source_node_id"), - Index("graph_edges_graph_target_idx", "graph_id", "target_node_id"), - ) - - -class GraphNodeSecret(BaseModel): - """Encrypted secrets for graph nodes (e.g. a2a_auth_headers). Not stored in node.data JSONB.""" - - __tablename__ = "graph_node_secrets" - - graph_id: Mapped[uuid.UUID] = mapped_column( - UUID(as_uuid=True), - ForeignKey("graphs.id", ondelete="CASCADE"), - nullable=False, - ) - node_id: Mapped[uuid.UUID] = mapped_column( - UUID(as_uuid=True), - ForeignKey("graph_nodes.id", ondelete="CASCADE"), - nullable=False, - ) - key_slug: Mapped[str] = mapped_column(String(64), nullable=False, default="a2a_auth_headers") - encrypted_value: Mapped[str] = mapped_column(Text, nullable=False) - - __table_args__ = (Index("graph_node_secrets_graph_node_idx", "graph_id", "node_id"),) diff --git a/backend/app/models/graph_deployment_version.py b/backend/app/models/graph_deployment_version.py deleted file mode 100644 index 430863160..000000000 --- a/backend/app/models/graph_deployment_version.py +++ /dev/null @@ -1,65 +0,0 @@ -""" -Graph deployment version model -""" - -import uuid -from datetime import datetime -from typing import TYPE_CHECKING, Optional - -from sqlalchemy import Boolean, DateTime, ForeignKey, Index, Integer, String, UniqueConstraint, func -from sqlalchemy.dialects.postgresql import JSONB, UUID -from sqlalchemy.orm import Mapped, mapped_column, relationship - -from .base import BaseModel - -if TYPE_CHECKING: - from .graph import AgentGraph - - -from app.utils.datetime import utc_now - - -class GraphDeploymentVersion(BaseModel): - """Agent Graph deployment version. - - - Each deployment creates a new version with an auto-incrementing version number - - Only one version is active at a time (is_active=True) - - Store a full graph state snapshot (nodes + edges + variables) - """ - - __tablename__ = "graph_deployment_version" - - graph_id: Mapped[uuid.UUID] = mapped_column( - UUID(as_uuid=True), - ForeignKey("graphs.id", ondelete="CASCADE"), - nullable=False, - ) - - version: Mapped[int] = mapped_column(Integer, nullable=False) - - name: Mapped[Optional[str]] = mapped_column(String(255), nullable=True) - - state: Mapped[dict] = mapped_column(JSONB, nullable=False) - - is_active: Mapped[bool] = mapped_column(Boolean, nullable=False, default=False) - - created_at: Mapped[datetime] = mapped_column( - DateTime(timezone=True), - default=utc_now, - server_default=func.now(), - nullable=False, - ) - - created_by: Mapped[Optional[str]] = mapped_column(String(255), nullable=True) - - graph: Mapped["AgentGraph"] = relationship( - "AgentGraph", - back_populates="deployment_versions", - lazy="selectin", - ) - - __table_args__ = ( - UniqueConstraint("graph_id", "version", name="graph_deployment_version_graph_version_unique"), - Index("graph_deployment_version_graph_active_idx", "graph_id", "is_active"), - Index("graph_deployment_version_created_at_idx", "created_at"), - ) diff --git a/backend/app/models/graph_execution.py b/backend/app/models/graph_execution.py deleted file mode 100644 index d1d0ff9ca..000000000 --- a/backend/app/models/graph_execution.py +++ /dev/null @@ -1,56 +0,0 @@ -""" -GraphExecution model -- track Graph executions triggered via OpenAPI. -""" - -import enum -import uuid -from datetime import datetime -from typing import Optional - -from sqlalchemy import DateTime, Enum, ForeignKey, Index, String, Text -from sqlalchemy.dialects.postgresql import JSONB, UUID -from sqlalchemy.orm import Mapped, mapped_column - -from .base import BaseModel - - -class ExecutionStatus(str, enum.Enum): - INIT = "init" - EXECUTING = "executing" - FINISH = "finish" - FAILED = "failed" - - -class GraphExecution(BaseModel): - """OpenAPI Graph execution record.""" - - __tablename__ = "graph_executions" - - graph_id: Mapped[uuid.UUID] = mapped_column( - UUID(as_uuid=True), - ForeignKey("graphs.id", ondelete="CASCADE"), - nullable=False, - ) - user_id: Mapped[str] = mapped_column( - String(255), - ForeignKey("user.id", ondelete="CASCADE"), - nullable=False, - ) - - status: Mapped[ExecutionStatus] = mapped_column( - Enum(ExecutionStatus, values_callable=lambda e: [m.value for m in e]), - default=ExecutionStatus.INIT, - nullable=False, - ) - input_variables: Mapped[Optional[dict]] = mapped_column(JSONB, nullable=True) - output: Mapped[Optional[dict]] = mapped_column(JSONB, nullable=True) - error_message: Mapped[Optional[str]] = mapped_column(Text, nullable=True) - - started_at: Mapped[Optional[datetime]] = mapped_column(DateTime(timezone=True), nullable=True) - finished_at: Mapped[Optional[datetime]] = mapped_column(DateTime(timezone=True), nullable=True) - - __table_args__ = ( - Index("graph_executions_graph_id_idx", "graph_id"), - Index("graph_executions_user_id_idx", "user_id"), - Index("graph_executions_status_idx", "status"), - ) diff --git a/backend/app/models/task.py b/backend/app/models/task.py new file mode 100644 index 000000000..5b762ab0b --- /dev/null +++ b/backend/app/models/task.py @@ -0,0 +1,93 @@ +from __future__ import annotations + +import enum +import uuid +from datetime import datetime +from typing import TYPE_CHECKING, Optional + +from sqlalchemy import Boolean, DateTime, Enum, Float, ForeignKey, Index, String, Text +from sqlalchemy.dialects.postgresql import JSONB, UUID +from sqlalchemy.orm import Mapped, mapped_column, relationship + +from .base import BaseModel + +if TYPE_CHECKING: + from .agent import Agent + from .agent_run import AgentRun + + +class TaskStatus(str, enum.Enum): + BACKLOG = "backlog" + TODO = "todo" + IN_PROGRESS = "in_progress" + IN_REVIEW = "in_review" + DONE = "done" + CANCELLED = "cancelled" + + +class TaskPriority(str, enum.Enum): + NONE = "none" + LOW = "low" + MEDIUM = "medium" + HIGH = "high" + URGENT = "urgent" + + +class Task(BaseModel): + __tablename__ = "tasks" + + workspace_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + ForeignKey("workspaces.id", ondelete="CASCADE"), + nullable=False, + ) + title: Mapped[str] = mapped_column(String(500), nullable=False) + description: Mapped[Optional[str]] = mapped_column(Text, nullable=True) + goal: Mapped[Optional[str]] = mapped_column(Text, nullable=True) + + status: Mapped[TaskStatus] = mapped_column( + Enum(TaskStatus, values_callable=lambda e: [m.value for m in e], name="taskstatus"), + nullable=False, + default=TaskStatus.BACKLOG, + ) + priority: Mapped[TaskPriority] = mapped_column( + Enum(TaskPriority, values_callable=lambda e: [m.value for m in e], name="taskpriority"), + nullable=False, + default=TaskPriority.NONE, + ) + + agent_id: Mapped[Optional[uuid.UUID]] = mapped_column( + UUID(as_uuid=True), + ForeignKey("agents.id"), + nullable=True, + ) + creator_id: Mapped[str] = mapped_column( + String(255), + ForeignKey("user.id", ondelete="CASCADE"), + nullable=False, + ) + parent_task_id: Mapped[Optional[uuid.UUID]] = mapped_column( + UUID(as_uuid=True), + ForeignKey("tasks.id", ondelete="SET NULL"), + nullable=True, + ) + latest_run_id: Mapped[Optional[uuid.UUID]] = mapped_column( + UUID(as_uuid=True), + ForeignKey("agent_runs.id"), + nullable=True, + ) + + due_date: Mapped[Optional[datetime]] = mapped_column(DateTime(timezone=True), nullable=True) + position: Mapped[float] = mapped_column(Float, nullable=False, default=0.0) + tags: Mapped[Optional[list]] = mapped_column(JSONB, nullable=True) + auto_approve: Mapped[bool] = mapped_column(Boolean, nullable=False, default=False) + + # Relationships + agent: Mapped[Optional["Agent"]] = relationship("Agent") + latest_run: Mapped[Optional["AgentRun"]] = relationship("AgentRun", foreign_keys=[latest_run_id]) + + __table_args__ = ( + Index("tasks_workspace_status_idx", "workspace_id", "status"), + Index("tasks_agent_idx", "agent_id"), + Index("tasks_creator_idx", "creator_id", "created_at"), + ) diff --git a/backend/app/models/task_activity.py b/backend/app/models/task_activity.py new file mode 100644 index 000000000..7e70353ff --- /dev/null +++ b/backend/app/models/task_activity.py @@ -0,0 +1,61 @@ +from __future__ import annotations + +import enum +import uuid +from typing import Optional + +from sqlalchemy import Enum, ForeignKey, Index, String, Text +from sqlalchemy.dialects.postgresql import UUID +from sqlalchemy.orm import Mapped, mapped_column + +from .base import BaseModel + + +class ActivityAuthorType(str, enum.Enum): + MEMBER = "member" + AGENT = "agent" + + +class ActivityType(str, enum.Enum): + COMMENT = "comment" + STATUS_CHANGE = "status_change" + PROGRESS_UPDATE = "progress_update" + SYSTEM = "system" + + +class TaskActivity(BaseModel): + __tablename__ = "task_activities" + + task_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + ForeignKey("tasks.id", ondelete="CASCADE"), + nullable=False, + ) + workspace_id: Mapped[uuid.UUID] = mapped_column( + UUID(as_uuid=True), + ForeignKey("workspaces.id", ondelete="CASCADE"), + nullable=False, + ) + author_type: Mapped[ActivityAuthorType] = mapped_column( + Enum(ActivityAuthorType, values_callable=lambda e: [m.value for m in e], name="activityauthortype"), + nullable=False, + ) + author_id: Mapped[str] = mapped_column(String(255), nullable=False) + content: Mapped[str] = mapped_column(Text, nullable=False) + type: Mapped[ActivityType] = mapped_column( + Enum(ActivityType, values_callable=lambda e: [m.value for m in e], name="activitytype"), + nullable=False, + default=ActivityType.COMMENT, + ) + parent_activity_id: Mapped[Optional[uuid.UUID]] = mapped_column( + UUID(as_uuid=True), + ForeignKey("task_activities.id", ondelete="SET NULL"), + nullable=True, + ) + + __table_args__ = ( + Index("task_activities_task_created_idx", "task_id", "created_at"), + Index("task_activities_workspace_idx", "workspace_id"), + Index("task_activities_author_idx", "author_type", "author_id"), + Index("task_activities_parent_idx", "parent_activity_id"), + ) diff --git a/backend/app/models/thread.py b/backend/app/models/thread.py new file mode 100644 index 000000000..a266cc72a --- /dev/null +++ b/backend/app/models/thread.py @@ -0,0 +1,28 @@ +from __future__ import annotations + +import uuid +from typing import TYPE_CHECKING, Optional + +from sqlalchemy import ForeignKey, String +from sqlalchemy.dialects.postgresql import UUID +from sqlalchemy.orm import Mapped, mapped_column, relationship + +from .base import BaseModel + +if TYPE_CHECKING: + from .agent import Agent + + +class Thread(BaseModel): + """A conversation thread between a user and an agent.""" + + __tablename__ = "threads" + + agent_id: Mapped[uuid.UUID] = mapped_column(UUID(as_uuid=True), ForeignKey("agents.id"), nullable=False) + workspace_id: Mapped[uuid.UUID] = mapped_column(UUID(as_uuid=True), ForeignKey("workspaces.id"), nullable=False) + title: Mapped[Optional[str]] = mapped_column(String(500), nullable=True) + status: Mapped[str] = mapped_column(String(20), nullable=False, default="active") + created_by: Mapped[str] = mapped_column(String(255), ForeignKey("user.id"), nullable=False) + + # Relationships + agent: Mapped[Agent] = relationship("Agent") diff --git a/backend/app/models/workspace.py b/backend/app/models/workspace.py index d89244ea2..b24519d1e 100644 --- a/backend/app/models/workspace.py +++ b/backend/app/models/workspace.py @@ -6,7 +6,7 @@ from enum import Enum as PyEnum from typing import TYPE_CHECKING, List, Optional -from sqlalchemy import Boolean, Enum, ForeignKey, Index, Integer, String, Text, UniqueConstraint +from sqlalchemy import Enum, ForeignKey, String, Text, UniqueConstraint from sqlalchemy.dialects.postgresql import JSONB, UUID from sqlalchemy.orm import Mapped, mapped_column, relationship @@ -115,45 +115,3 @@ class WorkspaceMember(BaseModel): ) __table_args__ = (UniqueConstraint("workspace_id", "user_id", name="uq_workspace_member"),) - - -class WorkspaceFolder(BaseModel, SoftDeleteMixin): - """Workspace folder.""" - - __tablename__ = "workspace_folder" - - name: Mapped[str] = mapped_column(String(255), nullable=False) - user_id: Mapped[str] = mapped_column( - String(255), - ForeignKey("user.id", ondelete="CASCADE"), - nullable=False, - ) - workspace_id: Mapped[uuid.UUID] = mapped_column( - UUID(as_uuid=True), - ForeignKey("workspaces.id", ondelete="CASCADE"), - nullable=False, - ) - parent_id: Mapped[Optional[uuid.UUID]] = mapped_column( - UUID(as_uuid=True), - ForeignKey("workspace_folder.id", ondelete="SET NULL"), - nullable=True, - ) - - color: Mapped[Optional[str]] = mapped_column(String(32), nullable=True, default="#6B7280") - is_expanded: Mapped[bool] = mapped_column(Boolean, nullable=False, default=True) - sort_order: Mapped[int] = mapped_column(Integer, nullable=False, default=0) - - user: Mapped["AuthUser"] = relationship("AuthUser", lazy="selectin") - workspace: Mapped["Workspace"] = relationship("Workspace", lazy="selectin") - parent: Mapped[Optional["WorkspaceFolder"]] = relationship( - "WorkspaceFolder", - remote_side="WorkspaceFolder.id", - lazy="selectin", - ) - - __table_args__ = ( - Index("workspace_folder_user_idx", "user_id"), - Index("workspace_folder_workspace_parent_idx", "workspace_id", "parent_id"), - Index("workspace_folder_parent_sort_idx", "parent_id", "sort_order"), - Index("workspace_folder_deleted_at_idx", "deleted_at"), - ) diff --git a/backend/app/models/workspace_files.py b/backend/app/models/workspace_files.py deleted file mode 100644 index ab11daca3..000000000 --- a/backend/app/models/workspace_files.py +++ /dev/null @@ -1,88 +0,0 @@ -""" -File storage models -""" - -import uuid -from datetime import datetime -from typing import TYPE_CHECKING, Optional - -from sqlalchemy import ForeignKey, Index, Integer, String, func -from sqlalchemy.dialects.postgresql import UUID -from sqlalchemy.orm import Mapped, mapped_column, relationship - -from .base import BaseModel, utc_now - -if TYPE_CHECKING: - from .auth import AuthUser - from .workspace import Workspace - - -class WorkspaceFile(BaseModel): - """Workspace file (legacy/simplified table).""" - - __tablename__ = "workspace_file" - - workspace_id: Mapped[uuid.UUID] = mapped_column( - UUID(as_uuid=True), - ForeignKey("workspaces.id", ondelete="CASCADE"), - nullable=False, - ) - name: Mapped[str] = mapped_column(String(255), nullable=False) - key: Mapped[str] = mapped_column(String(512), nullable=False, unique=True) - size: Mapped[int] = mapped_column(Integer, nullable=False) - type: Mapped[str] = mapped_column(String(100), nullable=False) - uploaded_by: Mapped[str] = mapped_column( - String(255), - ForeignKey("user.id", ondelete="CASCADE"), - nullable=False, - ) - uploaded_at: Mapped[datetime] = mapped_column( - nullable=False, - default=utc_now, - server_default=func.now(), - ) - - workspace: Mapped["Workspace"] = relationship("Workspace", lazy="selectin") - uploader: Mapped["AuthUser"] = relationship("AuthUser", lazy="selectin") - - __table_args__ = ( - Index("workspace_file_workspace_id_idx", "workspace_id"), - Index("workspace_file_key_idx", "key"), - ) - - -class WorkspaceStoredFile(BaseModel): - """Unified file storage table (multi-context).""" - - __tablename__ = "workspace_files" - - key: Mapped[str] = mapped_column(String(512), nullable=False, unique=True) - user_id: Mapped[str] = mapped_column( - String(255), - ForeignKey("user.id", ondelete="CASCADE"), - nullable=False, - ) - workspace_id: Mapped[Optional[uuid.UUID]] = mapped_column( - UUID(as_uuid=True), - ForeignKey("workspaces.id", ondelete="CASCADE"), - nullable=True, - ) - context: Mapped[str] = mapped_column(String(50), nullable=False) - original_name: Mapped[str] = mapped_column(String(255), nullable=False) - content_type: Mapped[str] = mapped_column(String(255), nullable=False) - size: Mapped[int] = mapped_column(Integer, nullable=False) - uploaded_at: Mapped[datetime] = mapped_column( - nullable=False, - default=utc_now, - server_default=func.now(), - ) - - user: Mapped["AuthUser"] = relationship("AuthUser", lazy="selectin") - workspace: Mapped[Optional["Workspace"]] = relationship("Workspace", lazy="selectin") - - __table_args__ = ( - Index("workspace_files_key_idx", "key"), - Index("workspace_files_user_id_idx", "user_id"), - Index("workspace_files_workspace_id_idx", "workspace_id"), - Index("workspace_files_context_idx", "context"), - ) diff --git a/backend/app/repositories/__init__.py b/backend/app/repositories/__init__.py index 0dd6bdf04..aeca8f571 100644 --- a/backend/app/repositories/__init__.py +++ b/backend/app/repositories/__init__.py @@ -5,8 +5,6 @@ from .auth_session import AuthSessionRepository from .auth_user import AuthUserRepository from .base import BaseRepository -from .graph import GraphEdgeRepository, GraphNodeRepository, GraphRepository -from .graph_deployment_version import GraphDeploymentVersionRepository from .mcp_server import McpServerRepository from .user import UserRepository @@ -15,9 +13,5 @@ "UserRepository", "AuthUserRepository", "AuthSessionRepository", - "GraphRepository", - "GraphNodeRepository", - "GraphEdgeRepository", - "GraphDeploymentVersionRepository", "McpServerRepository", ] diff --git a/backend/app/repositories/agent.py b/backend/app/repositories/agent.py new file mode 100644 index 000000000..29d60da2f --- /dev/null +++ b/backend/app/repositories/agent.py @@ -0,0 +1,59 @@ +""" +Repositories for Agent and AgentVersion. +""" + +from __future__ import annotations + +import uuid +from typing import List, Optional + +from sqlalchemy import func, select +from sqlalchemy.ext.asyncio import AsyncSession +from sqlalchemy.orm import selectinload + +from app.models.agent import Agent, AgentVersion + +from .base import BaseRepository + + +class AgentRepository(BaseRepository[Agent]): + def __init__(self, db: AsyncSession): + super().__init__(Agent, db) + + async def get_by_workspace_and_slug(self, workspace_id: uuid.UUID, slug: str) -> Optional[Agent]: + query = select(Agent).where( + Agent.workspace_id == workspace_id, + Agent.slug == slug, + ) + result = await self.db.execute(query) + return result.scalar_one_or_none() + + async def list_by_workspace(self, workspace_id: uuid.UUID) -> List[Agent]: + query = ( + select(Agent) + .options( + selectinload(Agent.current_draft_version), + selectinload(Agent.active_release), + ) + .where(Agent.workspace_id == workspace_id) + .order_by(Agent.created_at.desc()) + ) + result = await self.db.execute(query) + return list(result.scalars().all()) + + +class AgentVersionRepository(BaseRepository[AgentVersion]): + def __init__(self, db: AsyncSession): + super().__init__(AgentVersion, db) + + async def list_by_agent(self, agent_id: uuid.UUID) -> List[AgentVersion]: + query = ( + select(AgentVersion).where(AgentVersion.agent_id == agent_id).order_by(AgentVersion.version_number.desc()) + ) + result = await self.db.execute(query) + return list(result.scalars().all()) + + async def get_max_version_number(self, agent_id: uuid.UUID) -> int: + query = select(func.coalesce(func.max(AgentVersion.version_number), 0)).where(AgentVersion.agent_id == agent_id) + result = await self.db.execute(query) + return result.scalar() or 0 diff --git a/backend/app/repositories/agent_release.py b/backend/app/repositories/agent_release.py new file mode 100644 index 000000000..175eeafcf --- /dev/null +++ b/backend/app/repositories/agent_release.py @@ -0,0 +1,39 @@ +""" +Repository for AgentRelease. +""" + +from __future__ import annotations + +import uuid +from typing import List + +from sqlalchemy import func, select +from sqlalchemy.ext.asyncio import AsyncSession + +from app.models.agent import AgentRelease, AgentVersion + +from .base import BaseRepository + + +class AgentReleaseRepository(BaseRepository[AgentRelease]): + def __init__(self, db: AsyncSession): + super().__init__(AgentRelease, db) + + async def list_by_agent(self, agent_id: uuid.UUID) -> List[AgentRelease]: + """List all releases for an agent (joining through AgentVersion).""" + query = ( + select(AgentRelease) + .join(AgentVersion, AgentRelease.agent_version_id == AgentVersion.id) + .where(AgentVersion.agent_id == agent_id) + .order_by(AgentRelease.release_number.desc()) + ) + result = await self.db.execute(query) + return list(result.scalars().all()) + + async def get_max_release_number(self, agent_version_id: uuid.UUID) -> int: + """Get the max release number for a given agent version (for auto-increment).""" + query = select(func.coalesce(func.max(AgentRelease.release_number), 0)).where( + AgentRelease.agent_version_id == agent_version_id + ) + result = await self.db.execute(query) + return result.scalar() or 0 diff --git a/backend/app/repositories/agent_run.py b/backend/app/repositories/agent_run.py index afe3e8a07..b1562d885 100644 --- a/backend/app/repositories/agent_run.py +++ b/backend/app/repositories/agent_run.py @@ -1,18 +1,17 @@ """ -AgentRun repository helpers. +Repository for AgentRun. """ from __future__ import annotations import uuid -from datetime import datetime -from typing import Optional, Sequence +from typing import List, Optional -from sqlalchemy import and_, desc, or_, select -from sqlalchemy import delete as sa_delete +from sqlalchemy import or_, select from sqlalchemy.ext.asyncio import AsyncSession -from app.models.agent_run import AgentRun, AgentRunEvent, AgentRunSnapshot, AgentRunStatus +from app.models.agent import AgentRelease, AgentVersion +from app.models.agent_run import AgentRun from .base import BaseRepository @@ -21,137 +20,69 @@ class AgentRunRepository(BaseRepository[AgentRun]): def __init__(self, db: AsyncSession): super().__init__(AgentRun, db) - async def get_by_id_and_user(self, run_id: uuid.UUID, user_id: str) -> Optional[AgentRun]: - result = await self.db.execute( - select(AgentRun).where( - AgentRun.id == run_id, - AgentRun.user_id == user_id, - ) - ) - return result.scalar_one_or_none() - - async def get_snapshot(self, run_id: uuid.UUID) -> Optional[AgentRunSnapshot]: - result = await self.db.execute(select(AgentRunSnapshot).where(AgentRunSnapshot.run_id == run_id)) - return result.scalar_one_or_none() - - async def list_events_after( - self, run_id: uuid.UUID, after_seq: int = 0, limit: int = 500 - ) -> Sequence[AgentRunEvent]: - result = await self.db.execute( - select(AgentRunEvent) - .where( - AgentRunEvent.run_id == run_id, - AgentRunEvent.seq > after_seq, - ) - .order_by(AgentRunEvent.seq.asc()) + async def list_by_workspace(self, workspace_id: uuid.UUID, limit: int = 50) -> List[AgentRun]: + """List all runs for a workspace.""" + query = ( + select(AgentRun) + .where(AgentRun.workspace_id == workspace_id) + .order_by(AgentRun.created_at.desc()) .limit(limit) ) - return result.scalars().all() + result = await self.db.execute(query) + return list(result.scalars().all()) - async def get_run_for_update(self, run_id: uuid.UUID, user_id: Optional[str] = None) -> Optional[AgentRun]: - query = select(AgentRun).where(AgentRun.id == run_id) - if user_id is not None: - query = query.where(AgentRun.user_id == user_id) - result = await self.db.execute(query.with_for_update()) - return result.scalar_one_or_none() - - async def find_latest_active_skill_creator_run( + async def list_by_release( self, - *, - user_id: str, - graph_id: uuid.UUID, - thread_id: Optional[str] = None, - ) -> Optional[AgentRun]: - # Wrapper kept for backward compatibility with external callers. - return await self.find_latest_active_run( - user_id=user_id, - agent_name="skill_creator", - graph_id=graph_id, - thread_id=thread_id, - ) - - async def find_latest_active_run( + release_id: uuid.UUID, + workspace_id: uuid.UUID | None = None, + ) -> List[AgentRun]: + """List all runs for a specific release.""" + query = select(AgentRun).where(AgentRun.release_id == release_id).order_by(AgentRun.created_at.desc()) + if workspace_id: + query = query.where(AgentRun.workspace_id == workspace_id) + result = await self.db.execute(query) + return list(result.scalars().all()) + + async def list_by_task( self, - *, - user_id: str, - agent_name: str, - graph_id: Optional[uuid.UUID] = None, - thread_id: Optional[str] = None, - ) -> Optional[AgentRun]: - active_statuses = (AgentRunStatus.QUEUED, AgentRunStatus.RUNNING, AgentRunStatus.INTERRUPT_WAIT) - query = select(AgentRun).where( - AgentRun.user_id == user_id, - AgentRun.agent_name == agent_name, - AgentRun.status.in_(active_statuses), - ) - if graph_id is not None: - query = query.where(AgentRun.graph_id == graph_id) - if thread_id: - query = query.where(AgentRun.thread_id == thread_id) - result = await self.db.execute(query.order_by(desc(AgentRun.updated_at)).limit(1)) - return result.scalar_one_or_none() - - async def list_recent_runs_for_user( + task_id: uuid.UUID, + workspace_id: uuid.UUID | None = None, + ) -> List[AgentRun]: + """List all runs for a specific task.""" + query = select(AgentRun).where(AgentRun.task_id == task_id).order_by(AgentRun.created_at.desc()) + if workspace_id: + query = query.where(AgentRun.workspace_id == workspace_id) + result = await self.db.execute(query) + return list(result.scalars().all()) + + async def find_by_agent_and_trigger( self, - *, - user_id: str, - run_type: Optional[str] = None, - agent_name: Optional[str] = None, + agent_id: uuid.UUID, + workspace_id: uuid.UUID, + trigger_medium: Optional[str] = None, + run_purpose: Optional[str] = None, status: Optional[str] = None, - search: Optional[str] = None, - graph_id: Optional[uuid.UUID] = None, - limit: int = 50, - ) -> Sequence[AgentRun]: - query = select(AgentRun).where(AgentRun.user_id == user_id) - if run_type: - query = query.where(AgentRun.run_type == run_type) - if agent_name: - query = query.where(AgentRun.agent_name == agent_name) - if status: - query = query.where(AgentRun.status == status) - if search: - query = query.where(AgentRun.title.ilike(f"%{search}%")) - if graph_id: - query = query.where(AgentRun.graph_id == graph_id) - result = await self.db.execute(query.order_by(desc(AgentRun.updated_at)).limit(limit)) - return result.scalars().all() - - async def delete_runs_for_graph( - self, - *, - user_id: str, - agent_name: str, - graph_id: uuid.UUID, - ) -> int: - """Hard-delete all runs (and cascaded events/snapshots) for a graph.""" - result = await self.db.execute( - sa_delete(AgentRun).where( - AgentRun.user_id == user_id, - AgentRun.agent_name == agent_name, - AgentRun.graph_id == graph_id, - ) - ) - await self.db.commit() - return getattr(result, "rowcount", 0) or 0 - - async def list_recoverable_stale_runs( - self, - *, - stale_before: datetime, - ) -> Sequence[AgentRun]: - recoverable_statuses = (AgentRunStatus.QUEUED, AgentRunStatus.RUNNING) - result = await self.db.execute( + ) -> List[AgentRun]: + """Find runs for a specific agent, optionally filtered by trigger_medium, run_purpose and status.""" + query = ( select(AgentRun) - .where( - AgentRun.status.in_(recoverable_statuses), + .outerjoin(AgentRelease, AgentRun.release_id == AgentRelease.id) + .outerjoin( + AgentVersion, or_( - and_( - AgentRun.last_heartbeat_at.is_(None), - AgentRun.updated_at < stale_before, - ), - AgentRun.last_heartbeat_at < stale_before, + AgentRelease.agent_version_id == AgentVersion.id, + AgentRun.agent_version_id == AgentVersion.id, ), ) - .order_by(desc(AgentRun.updated_at)) + .where(AgentVersion.agent_id == agent_id) + .where(AgentRun.workspace_id == workspace_id) ) - return result.scalars().all() + if trigger_medium: + query = query.where(AgentRun.trigger_medium == trigger_medium) + if run_purpose: + query = query.where(AgentRun.run_purpose == run_purpose) + if status: + query = query.where(AgentRun.status == status) + query = query.order_by(AgentRun.created_at.desc()).limit(10) + result = await self.db.execute(query) + return list(result.scalars().all()) diff --git a/backend/app/repositories/execution.py b/backend/app/repositories/execution.py new file mode 100644 index 000000000..06c66b648 --- /dev/null +++ b/backend/app/repositories/execution.py @@ -0,0 +1,73 @@ +""" +Repository for Execution and ExecutionEvent. +""" + +from __future__ import annotations + +import uuid +from datetime import datetime +from typing import List + +from sqlalchemy import func, select +from sqlalchemy.ext.asyncio import AsyncSession + +from app.models.execution import Execution, ExecutionEvent + +from .base import BaseRepository + + +class ExecutionRepository(BaseRepository[Execution]): + def __init__(self, db: AsyncSession): + super().__init__(Execution, db) + + async def list_by_run(self, run_id: uuid.UUID) -> List[Execution]: + """List all executions for a run, ordered by attempt_index.""" + query = select(Execution).where(Execution.run_id == run_id).order_by(Execution.attempt_index.asc()) + result = await self.db.execute(query) + return list(result.scalars().all()) + + async def get_max_attempt(self, run_id: uuid.UUID) -> int: + """Get the max attempt_index for a given run.""" + query = select(func.coalesce(func.max(Execution.attempt_index), 0)).where(Execution.run_id == run_id) + result = await self.db.execute(query) + return result.scalar() or 0 + + async def list_recoverable_stale( + self, + statuses: tuple[str, ...], + stale_before: datetime, + ) -> List[Execution]: + """Query executions eligible for stale-reaping. + + Args: + statuses: execution status values to match (e.g. ``("pending", "dispatched")``). + stale_before: cutoff datetime; executions whose ``started_at`` + (or ``created_at`` if never started) is older than this are stale. + + Returns: + List of stale Execution records, oldest first. + """ + cutoff = func.coalesce(Execution.started_at, Execution.created_at) + query = ( + select(Execution) + .where(Execution.status.in_(statuses), cutoff < stale_before) + .order_by(Execution.created_at.asc()) + ) + result = await self.db.execute(query) + return list(result.scalars().all()) + + +class ExecutionEventRepository(BaseRepository[ExecutionEvent]): + def __init__(self, db: AsyncSession): + super().__init__(ExecutionEvent, db) + + async def list_by_execution(self, execution_id: uuid.UUID, limit: int = 500) -> List[ExecutionEvent]: + """List events for an execution, ordered by sequence_no.""" + query = ( + select(ExecutionEvent) + .where(ExecutionEvent.execution_id == execution_id) + .order_by(ExecutionEvent.sequence_no.asc()) + .limit(limit) + ) + result = await self.db.execute(query) + return list(result.scalars().all()) diff --git a/backend/app/repositories/graph.py b/backend/app/repositories/graph.py deleted file mode 100644 index 8c60c5e83..000000000 --- a/backend/app/repositories/graph.py +++ /dev/null @@ -1,123 +0,0 @@ -""" -Graph repositories -""" - -from __future__ import annotations - -import uuid -from typing import List, Optional - -from sqlalchemy import and_, delete, or_, select -from sqlalchemy.ext.asyncio import AsyncSession -from sqlalchemy.orm import selectinload - -from app.models.graph import AgentGraph, GraphEdge, GraphNode - -from .base import BaseRepository - - -def _graph_not_deleted(query): - """Filter out soft-deleted graphs.""" - return query.where(AgentGraph.deleted_at.is_(None)) - - -class GraphRepository(BaseRepository[AgentGraph]): - """Agent Graph Repository (soft-delete aware)""" - - def __init__(self, db: AsyncSession): - super().__init__(AgentGraph, db) - - async def get(self, id: uuid.UUID, relations: Optional[List[str]] = None): - """Get graph by ID; returns None if deleted.""" - query = select(AgentGraph).where(AgentGraph.id == id) - query = _graph_not_deleted(query) - if relations: - for rel in relations: - if hasattr(AgentGraph, rel): - query = query.options(selectinload(getattr(AgentGraph, rel))) - result = await self.db.execute(query) - return result.scalar_one_or_none() - - async def list_by_user_with_filters( - self, - user_id: str, - parent_id: Optional[uuid.UUID] = None, - workspace_id: Optional[uuid.UUID] = None, - ) -> List[AgentGraph]: - """List graphs by user ID (exclude soft-deleted).""" - query = select(AgentGraph).where(AgentGraph.user_id == user_id) - query = _graph_not_deleted(query) - if parent_id is not None: - query = query.where(AgentGraph.parent_id == parent_id) - if workspace_id is not None: - query = query.where(AgentGraph.workspace_id == workspace_id) - query = query.order_by(AgentGraph.created_at.desc(), AgentGraph.id.desc()) - result = await self.db.execute(query) - return list(result.scalars().all()) - - -class GraphNodeRepository(BaseRepository[GraphNode]): - """Graph Node Repository""" - - def __init__(self, db: AsyncSession): - super().__init__(GraphNode, db) - - async def list_by_graph(self, graph_id: uuid.UUID) -> List[GraphNode]: - """List all nodes for a graph.""" - query = select(GraphNode).where(GraphNode.graph_id == graph_id) - result = await self.db.execute(query) - return list(result.scalars().all()) - - async def delete_by_graph(self, graph_id: uuid.UUID) -> int: - """Delete all nodes of a graph.""" - stmt = delete(GraphNode).where(GraphNode.graph_id == graph_id) - result = await self.db.execute(stmt) - return getattr(result, "rowcount", 0) or 0 - - async def delete_by_ids(self, graph_id: uuid.UUID, node_ids: List[uuid.UUID]) -> int: - """Batch-delete nodes by IDs.""" - if not node_ids: - return 0 - stmt = delete(GraphNode).where( - and_( - GraphNode.graph_id == graph_id, - GraphNode.id.in_(node_ids), - ) - ) - result = await self.db.execute(stmt) - return getattr(result, "rowcount", 0) or 0 - - -class GraphEdgeRepository(BaseRepository[GraphEdge]): - """Graph Edge Repository""" - - def __init__(self, db: AsyncSession): - super().__init__(GraphEdge, db) - - async def list_by_graph(self, graph_id: uuid.UUID) -> List[GraphEdge]: - """List all edges for a graph.""" - query = select(GraphEdge).where(GraphEdge.graph_id == graph_id) - result = await self.db.execute(query) - return list(result.scalars().all()) - - async def delete_by_graph(self, graph_id: uuid.UUID) -> int: - """Delete all edges of a graph.""" - stmt = delete(GraphEdge).where(GraphEdge.graph_id == graph_id) - result = await self.db.execute(stmt) - return getattr(result, "rowcount", 0) or 0 - - async def delete_by_node_ids(self, graph_id: uuid.UUID, node_ids: List[uuid.UUID]) -> int: - """Delete all edges connected to the specified nodes.""" - if not node_ids: - return 0 - stmt = delete(GraphEdge).where( - and_( - GraphEdge.graph_id == graph_id, - or_( - GraphEdge.source_node_id.in_(node_ids), - GraphEdge.target_node_id.in_(node_ids), - ), - ) - ) - result = await self.db.execute(stmt) - return getattr(result, "rowcount", 0) or 0 diff --git a/backend/app/repositories/graph_deployment_version.py b/backend/app/repositories/graph_deployment_version.py deleted file mode 100644 index 51e1a21be..000000000 --- a/backend/app/repositories/graph_deployment_version.py +++ /dev/null @@ -1,198 +0,0 @@ -""" -Graph deployment version Repository -""" - -from __future__ import annotations - -import uuid -from datetime import datetime, timezone -from typing import Any, Dict, List, Optional - -from sqlalchemy import and_, delete, func, select, update -from sqlalchemy.ext.asyncio import AsyncSession - -from app.models.graph_deployment_version import GraphDeploymentVersion - -from .base import BaseRepository - - -class GraphDeploymentVersionRepository(BaseRepository[GraphDeploymentVersion]): - """Graph deployment version Repository.""" - - def __init__(self, db: AsyncSession): - super().__init__(GraphDeploymentVersion, db) - - async def get_by_graph_and_version(self, graph_id: uuid.UUID, version: int) -> Optional[GraphDeploymentVersion]: - """Get a specific version of a graph.""" - query = select(GraphDeploymentVersion).where( - and_( - GraphDeploymentVersion.graph_id == graph_id, - GraphDeploymentVersion.version == version, - ) - ) - result = await self.db.execute(query) - return result.scalar_one_or_none() - - async def get_active_version(self, graph_id: uuid.UUID) -> Optional[GraphDeploymentVersion]: - """Get the active version of a graph.""" - query = ( - select(GraphDeploymentVersion) - .where( - and_( - GraphDeploymentVersion.graph_id == graph_id, - GraphDeploymentVersion.is_active, - ) - ) - .order_by(GraphDeploymentVersion.created_at.desc()) - ) - result = await self.db.execute(query) - return result.scalar_one_or_none() - - async def list_by_graph(self, graph_id: uuid.UUID, include_inactive: bool = True) -> List[GraphDeploymentVersion]: - """List all versions of a graph.""" - query = select(GraphDeploymentVersion).where(GraphDeploymentVersion.graph_id == graph_id) - - if not include_inactive: - query = query.where(GraphDeploymentVersion.is_active) - - query = query.order_by(GraphDeploymentVersion.version.desc()) - - result = await self.db.execute(query) - return list(result.scalars().all()) - - async def list_by_graph_paginated( - self, - graph_id: uuid.UUID, - page: int = 1, - page_size: int = 10, - include_inactive: bool = True, - ) -> tuple[List[GraphDeploymentVersion], int]: - """List versions of a graph (paginated). - - Returns: - tuple: (version list, total count) - """ - base_query = select(GraphDeploymentVersion).where(GraphDeploymentVersion.graph_id == graph_id) - - if not include_inactive: - base_query = base_query.where(GraphDeploymentVersion.is_active) - - # get total count - count_query = select(func.count()).where(GraphDeploymentVersion.graph_id == graph_id) - if not include_inactive: - count_query = count_query.where(GraphDeploymentVersion.is_active) - count_result = await self.db.execute(count_query) - total = count_result.scalar() or 0 - - # paginated query - offset = (page - 1) * page_size - query = base_query.order_by(GraphDeploymentVersion.version.desc()).offset(offset).limit(page_size) - - result = await self.db.execute(query) - versions = list(result.scalars().all()) - - return versions, total - - async def get_next_version_number(self, graph_id: uuid.UUID) -> int: - """Get the next version number.""" - query = select(func.coalesce(func.max(GraphDeploymentVersion.version), 0)).where( - GraphDeploymentVersion.graph_id == graph_id - ) - result = await self.db.execute(query) - max_version = result.scalar() or 0 - return max_version + 1 - - async def deactivate_all_versions(self, graph_id: uuid.UUID) -> int: - """Deactivate all versions of a graph.""" - stmt = update(GraphDeploymentVersion).where(GraphDeploymentVersion.graph_id == graph_id).values(is_active=False) - result = await self.db.execute(stmt) - return getattr(result, "rowcount", 0) or 0 - - async def create_version( - self, - graph_id: uuid.UUID, - state: Dict[str, Any], - created_by: Optional[str] = None, - name: Optional[str] = None, - ) -> GraphDeploymentVersion: - """Create a new version.""" - next_version = await self.get_next_version_number(graph_id) - await self.deactivate_all_versions(graph_id) - - version_data = { - "graph_id": graph_id, - "version": next_version, - "state": state, - "is_active": True, - "created_at": datetime.now(timezone.utc), - } - if created_by is not None: - version_data["created_by"] = created_by - if name is not None: - version_data["name"] = name - - instance = GraphDeploymentVersion(**version_data) - self.db.add(instance) - await self.db.flush() - await self.db.refresh(instance) - - return instance - - async def activate_version(self, graph_id: uuid.UUID, version: int) -> Optional[GraphDeploymentVersion]: - """Activate a specific version.""" - await self.deactivate_all_versions(graph_id) - - stmt = ( - update(GraphDeploymentVersion) - .where( - and_( - GraphDeploymentVersion.graph_id == graph_id, - GraphDeploymentVersion.version == version, - ) - ) - .values(is_active=True) - ) - await self.db.execute(stmt) - await self.db.flush() - - return await self.get_by_graph_and_version(graph_id, version) - - async def rename_version(self, graph_id: uuid.UUID, version: int, name: str) -> Optional[GraphDeploymentVersion]: - """Rename a version.""" - stmt = ( - update(GraphDeploymentVersion) - .where( - and_( - GraphDeploymentVersion.graph_id == graph_id, - GraphDeploymentVersion.version == version, - ) - ) - .values(name=name) - ) - await self.db.execute(stmt) - await self.db.flush() - - return await self.get_by_graph_and_version(graph_id, version) - - async def count_by_graph(self, graph_id: uuid.UUID) -> int: - """Count versions for a graph.""" - query = select(func.count()).where(GraphDeploymentVersion.graph_id == graph_id) - result = await self.db.execute(query) - return result.scalar() or 0 - - async def delete_by_graph(self, graph_id: uuid.UUID) -> int: - """Delete all versions of a graph.""" - stmt = delete(GraphDeploymentVersion).where(GraphDeploymentVersion.graph_id == graph_id) - result = await self.db.execute(stmt) - return getattr(result, "rowcount", 0) or 0 - - async def delete_version(self, graph_id: uuid.UUID, version: int) -> int: - """Delete a specific version.""" - stmt = delete(GraphDeploymentVersion).where( - and_( - GraphDeploymentVersion.graph_id == graph_id, - GraphDeploymentVersion.version == version, - ) - ) - result = await self.db.execute(stmt) - return getattr(result, "rowcount", 0) or 0 diff --git a/backend/app/repositories/graph_execution.py b/backend/app/repositories/graph_execution.py deleted file mode 100644 index c0c96e9e5..000000000 --- a/backend/app/repositories/graph_execution.py +++ /dev/null @@ -1,30 +0,0 @@ -""" -GraphExecution Repository -""" - -from __future__ import annotations - -import uuid -from typing import Optional - -from sqlalchemy import select -from sqlalchemy.ext.asyncio import AsyncSession - -from app.models.graph_execution import GraphExecution - -from .base import BaseRepository - - -class GraphExecutionRepository(BaseRepository[GraphExecution]): - def __init__(self, db: AsyncSession): - super().__init__(GraphExecution, db) - - async def get_by_id_and_user(self, execution_id: uuid.UUID, user_id: str) -> Optional[GraphExecution]: - """Get an execution record for a specific user.""" - result = await self.db.execute( - select(GraphExecution).where( - GraphExecution.id == execution_id, - GraphExecution.user_id == user_id, - ) - ) - return result.scalar_one_or_none() diff --git a/backend/app/repositories/model_instance.py b/backend/app/repositories/model_instance.py index 60d845e60..51c4d99a9 100644 --- a/backend/app/repositories/model_instance.py +++ b/backend/app/repositories/model_instance.py @@ -32,7 +32,6 @@ async def get_best_instance( self, model_name: str, provider_id: uuid.UUID, - provider_name: str = "", # kept for call-site compatibility; unused user_id: Optional[str] = None, ) -> ModelInstance | None: """Get an instance by provider and model name. Prefer global instances; fall back to any valid one.""" @@ -63,7 +62,6 @@ async def list_all(self) -> list[ModelInstance]: async def list_by_provider( self, provider_id: uuid.UUID, - provider_name: Optional[str] = None, # kept for call-site compatibility; unused ) -> list[ModelInstance]: """Filter model instances by provider.""" query = ( diff --git a/backend/app/repositories/task.py b/backend/app/repositories/task.py new file mode 100644 index 000000000..d7ce60d39 --- /dev/null +++ b/backend/app/repositories/task.py @@ -0,0 +1,73 @@ +""" +Task repository helpers. +""" + +from __future__ import annotations + +import uuid +from typing import Optional, Sequence + +from sqlalchemy import desc, select +from sqlalchemy.ext.asyncio import AsyncSession + +from app.models.task import Task, TaskStatus + +from .base import BaseRepository + + +class TaskRepository(BaseRepository[Task]): + def __init__(self, db: AsyncSession): + super().__init__(Task, db) + + async def get_by_id_and_workspace(self, task_id: uuid.UUID, workspace_id: uuid.UUID) -> Optional[Task]: + result = await self.db.execute( + select(Task).where( + Task.id == task_id, + Task.workspace_id == workspace_id, + ) + ) + return result.scalar_one_or_none() + + async def get_for_update(self, task_id: uuid.UUID, workspace_id: Optional[uuid.UUID] = None) -> Optional[Task]: + query = select(Task).where(Task.id == task_id) + if workspace_id is not None: + query = query.where(Task.workspace_id == workspace_id) + result = await self.db.execute(query.with_for_update()) + return result.scalar_one_or_none() + + async def list_by_workspace( + self, + *, + workspace_id: uuid.UUID, + status: Optional[str] = None, + creator_id: Optional[str] = None, + agent_id: Optional[uuid.UUID] = None, + parent_task_id: Optional[uuid.UUID] = None, + limit: int = 50, + ) -> Sequence[Task]: + query = select(Task).where(Task.workspace_id == workspace_id) + if status: + query = query.where(Task.status == status) + if creator_id: + query = query.where(Task.creator_id == creator_id) + if agent_id: + query = query.where(Task.agent_id == agent_id) + if parent_task_id: + query = query.where(Task.parent_task_id == parent_task_id) + result = await self.db.execute(query.order_by(Task.position.asc(), desc(Task.created_at)).limit(limit)) + return result.scalars().all() + + async def list_dispatchable(self, *, workspace_id: Optional[uuid.UUID] = None, limit: int = 10) -> Sequence[Task]: + """Find BACKLOG tasks with an agent assigned, ready for dispatch. + + When workspace_id is None, searches across all workspaces. + """ + query = select(Task).where( + Task.status == TaskStatus.BACKLOG, + Task.agent_id.isnot(None), + Task.latest_run_id.is_(None), + ) + if workspace_id is not None: + query = query.where(Task.workspace_id == workspace_id) + result = await self.db.execute(query.order_by(Task.position.asc(), Task.created_at.asc()).limit(limit)) + return result.scalars().all() diff --git a/backend/app/repositories/task_activity.py b/backend/app/repositories/task_activity.py new file mode 100644 index 000000000..4faaaefc2 --- /dev/null +++ b/backend/app/repositories/task_activity.py @@ -0,0 +1,67 @@ +""" +TaskActivity repository helpers. +""" + +from __future__ import annotations + +import uuid +from datetime import datetime +from typing import Optional, Sequence + +from sqlalchemy import asc, desc, literal, select +from sqlalchemy.ext.asyncio import AsyncSession + +from app.models.task_activity import ActivityAuthorType, TaskActivity + +from .base import BaseRepository + + +class TaskActivityRepository(BaseRepository[TaskActivity]): + def __init__(self, db: AsyncSession): + super().__init__(TaskActivity, db) + + async def list_by_task( + self, + task_id: uuid.UUID, + *, + cursor: Optional[datetime] = None, + limit: int = 50, + order_asc: bool = True, + ) -> Sequence[TaskActivity]: + query = select(TaskActivity).where(TaskActivity.task_id == task_id) + if cursor is not None: + if order_asc: + query = query.where(TaskActivity.created_at > cursor) + else: + query = query.where(TaskActivity.created_at < cursor) + order = asc(TaskActivity.created_at) if order_asc else desc(TaskActivity.created_at) + result = await self.db.execute(query.order_by(order).limit(limit)) + return result.scalars().all() + + async def get_by_id_and_task(self, activity_id: uuid.UUID, task_id: uuid.UUID) -> Optional[TaskActivity]: + result = await self.db.execute( + select(TaskActivity).where( + TaskActivity.id == activity_id, + TaskActivity.task_id == task_id, + ) + ) + return result.scalar_one_or_none() + + async def has_agent_posted_since( + self, + task_id: uuid.UUID, + agent_id: str, + since: datetime, + ) -> bool: + stmt = ( + select(literal(True)) + .where( + TaskActivity.task_id == task_id, + TaskActivity.author_type == ActivityAuthorType.AGENT, + TaskActivity.author_id == agent_id, + TaskActivity.created_at >= since, + ) + .limit(1) + ) + result = await self.db.execute(stmt) + return result.scalar() is not None diff --git a/backend/app/repositories/thread.py b/backend/app/repositories/thread.py new file mode 100644 index 000000000..2c59dfc57 --- /dev/null +++ b/backend/app/repositories/thread.py @@ -0,0 +1,30 @@ +""" +Repositories for Thread. +""" + +from __future__ import annotations + +import uuid +from typing import List + +from sqlalchemy import select +from sqlalchemy.ext.asyncio import AsyncSession + +from app.models.thread import Thread + +from .base import BaseRepository + + +class ThreadRepository(BaseRepository[Thread]): + def __init__(self, db: AsyncSession): + super().__init__(Thread, db) + + async def list_by_agent(self, agent_id: uuid.UUID) -> List[Thread]: + query = select(Thread).where(Thread.agent_id == agent_id).order_by(Thread.updated_at.desc()) + result = await self.db.execute(query) + return list(result.scalars().all()) + + async def list_by_workspace(self, workspace_id: uuid.UUID) -> List[Thread]: + query = select(Thread).where(Thread.workspace_id == workspace_id).order_by(Thread.updated_at.desc()) + result = await self.db.execute(query) + return list(result.scalars().all()) diff --git a/backend/app/repositories/workspace_file.py b/backend/app/repositories/workspace_file.py deleted file mode 100644 index 7c8b75b1a..000000000 --- a/backend/app/repositories/workspace_file.py +++ /dev/null @@ -1,76 +0,0 @@ -""" -Workspace file storage Repository -""" - -import uuid -from typing import List, Optional - -from sqlalchemy import func, select -from sqlalchemy.ext.asyncio import AsyncSession - -from app.models.workspace_files import WorkspaceStoredFile - -from .base import BaseRepository - - -class WorkspaceStoredFileRepository(BaseRepository[WorkspaceStoredFile]): - """Workspace file metadata access.""" - - CONTEXT_WORKSPACE = "workspace" - - def __init__(self, db: AsyncSession): - super().__init__(WorkspaceStoredFile, db) - - async def list_workspace_files(self, workspace_id: uuid.UUID) -> List[WorkspaceStoredFile]: - """List workspace files ordered by upload time.""" - query = ( - select(WorkspaceStoredFile) - .where( - WorkspaceStoredFile.workspace_id == workspace_id, - WorkspaceStoredFile.context == self.CONTEXT_WORKSPACE, - ) - .order_by(WorkspaceStoredFile.uploaded_at.asc()) - ) - result = await self.db.execute(query) - return list(result.scalars().all()) - - async def get_by_id_and_workspace( - self, file_id: uuid.UUID, workspace_id: uuid.UUID - ) -> Optional[WorkspaceStoredFile]: - """Get a record by file ID and workspace.""" - query = select(WorkspaceStoredFile).where( - WorkspaceStoredFile.id == file_id, - WorkspaceStoredFile.workspace_id == workspace_id, - WorkspaceStoredFile.context == self.CONTEXT_WORKSPACE, - ) - result = await self.db.execute(query) - return result.scalar_one_or_none() - - async def find_by_name(self, workspace_id: uuid.UUID, original_name: str) -> Optional[WorkspaceStoredFile]: - """Detect a file with the same name.""" - query = select(WorkspaceStoredFile).where( - WorkspaceStoredFile.workspace_id == workspace_id, - WorkspaceStoredFile.original_name == original_name, - WorkspaceStoredFile.context == self.CONTEXT_WORKSPACE, - ) - result = await self.db.execute(query) - return result.scalar_one_or_none() - - async def sum_user_usage(self, user_id: uuid.UUID) -> int: - """Calculate total storage used by all files of a user (bytes).""" - query = select(func.coalesce(func.sum(WorkspaceStoredFile.size), 0)).where( - WorkspaceStoredFile.user_id == user_id - ) - result = await self.db.execute(query) - total = result.scalar() or 0 - return int(total) - - async def sum_workspace_usage(self, workspace_id: uuid.UUID) -> int: - """Calculate total storage used by files in a workspace (bytes).""" - query = select(func.coalesce(func.sum(WorkspaceStoredFile.size), 0)).where( - WorkspaceStoredFile.workspace_id == workspace_id, - WorkspaceStoredFile.context == self.CONTEXT_WORKSPACE, - ) - result = await self.db.execute(query) - total = result.scalar() or 0 - return int(total) diff --git a/backend/app/repositories/workspace_folder.py b/backend/app/repositories/workspace_folder.py deleted file mode 100644 index 0c201610a..000000000 --- a/backend/app/repositories/workspace_folder.py +++ /dev/null @@ -1,68 +0,0 @@ -""" -Workspace folder Repository -""" - -import uuid -from typing import List, Optional, Tuple - -from sqlalchemy import select -from sqlalchemy.ext.asyncio import AsyncSession - -from app.models.workspace import WorkspaceFolder - -from .base import BaseRepository - - -class WorkflowFolderRepository(BaseRepository[WorkspaceFolder]): - """Folder data access.""" - - def __init__(self, db: AsyncSession): - super().__init__(WorkspaceFolder, db) - - async def list_by_workspace(self, workspace_id: uuid.UUID) -> List[WorkspaceFolder]: - query = ( - select(WorkspaceFolder) - .where( - WorkspaceFolder.workspace_id == workspace_id, - WorkspaceFolder.deleted_at.is_(None), - ) - .order_by(WorkspaceFolder.sort_order.asc(), WorkspaceFolder.created_at.asc()) - ) - result = await self.db.execute(query) - return list(result.scalars().all()) - - async def max_sort_order(self, workspace_id: uuid.UUID, parent_id: Optional[uuid.UUID]) -> int: - """ - Return the maximum sort_order under the given workspace + parent. - Return -1 if no records exist (so the caller can use next_sort = max + 1, starting at 0). - """ - conditions = [ - WorkspaceFolder.workspace_id == workspace_id, - WorkspaceFolder.parent_id.is_(None) if parent_id is None else WorkspaceFolder.parent_id == parent_id, - WorkspaceFolder.deleted_at.is_(None), - ] - - query = ( - select(WorkspaceFolder.sort_order).where(*conditions).order_by(WorkspaceFolder.sort_order.desc()).limit(1) - ) - result = await self.db.execute(query) - current = result.scalar_one_or_none() - return current if current is not None else -1 - - async def list_relations_by_workspace(self, workspace_id: uuid.UUID) -> List[Tuple[uuid.UUID, Optional[uuid.UUID]]]: - """Get all (id, parent_id) pairs in a workspace for building a tree/subtree.""" - query = select(WorkspaceFolder.id, WorkspaceFolder.parent_id).where( - WorkspaceFolder.workspace_id == workspace_id, - WorkspaceFolder.deleted_at.is_(None), - ) - - result = await self.db.execute(query) - return [(row[0], row[1]) for row in result.fetchall()] - - async def ensure_same_workspace(self, folder_id: uuid.UUID, workspace_id: uuid.UUID) -> WorkspaceFolder: - folder = await self.get(folder_id) - if not folder or folder.workspace_id != workspace_id: - from app.common.exceptions import NotFoundException - - raise NotFoundException("Folder not found in workspace") - return folder diff --git a/backend/app/schemas/__init__.py b/backend/app/schemas/__init__.py index d2fe8edae..3922843ce 100644 --- a/backend/app/schemas/__init__.py +++ b/backend/app/schemas/__init__.py @@ -5,28 +5,6 @@ from .base import BaseResponse from .chat import ChatRequest, ChatResponse from .common import PaginatedResponse -from .conversation import ( - CheckpointResponse, - ConversationCreate, - ConversationDetailResponse, - ConversationExportResponse, - ConversationImportRequest, - ConversationMessageResponse, - ConversationResponse, - ConversationUpdate, - SearchRequest, - SearchResponse, - UserStatsResponse, -) -from .graph_deployment_version import ( - GraphDeploymentVersionListResponse, - GraphDeploymentVersionResponse, - GraphDeploymentVersionResponseCamel, - GraphDeployRequest, - GraphDeployResponse, - GraphRenameVersionRequest, - GraphRevertResponse, -) from .mcp import ( ConnectionTestResult, McpServerCreate, @@ -41,19 +19,8 @@ "BaseResponse", "PaginatedResponse", "UserResponse", - "ConversationCreate", - "ConversationUpdate", - "ConversationResponse", - "ConversationDetailResponse", - "ConversationExportResponse", - "ConversationImportRequest", - "CheckpointResponse", "ChatRequest", "ChatResponse", - "SearchRequest", - "SearchResponse", - "UserStatsResponse", - "ConversationMessageResponse", # MCP Schemas "McpServerCreate", "McpServerUpdate", @@ -61,12 +28,4 @@ "ConnectionTestResult", "ToolInfo", "ToolResponse", - # Graph Deployment Version Schemas - "GraphDeploymentVersionResponse", - "GraphDeploymentVersionResponseCamel", - "GraphDeploymentVersionListResponse", - "GraphDeployRequest", - "GraphDeployResponse", - "GraphRevertResponse", - "GraphRenameVersionRequest", ] diff --git a/backend/app/schemas/agent.py b/backend/app/schemas/agent.py new file mode 100644 index 000000000..546c8de82 --- /dev/null +++ b/backend/app/schemas/agent.py @@ -0,0 +1,86 @@ +""" +Pydantic schemas for Agent API. +""" + +from __future__ import annotations + +import uuid +from datetime import datetime +from typing import Any, Dict, Literal, Optional + +from pydantic import BaseModel, Field + +from app.core.contracts.agent import EngineKind, RuntimeKind + +# --------------------------------------------------------------------------- +# Literals +# --------------------------------------------------------------------------- + +AgentStatusLiteral = Literal["draft", "active", "archived"] + +# --------------------------------------------------------------------------- +# Request schemas +# --------------------------------------------------------------------------- + + +class CreateAgentRequest(BaseModel): + name: str = Field(..., max_length=255) + description: Optional[str] = None + avatar: Optional[str] = None + engine_kind: EngineKind = "langgraph_visual" + definition_payload: Optional[Dict[str, Any]] = None + capability_manifest: Optional[Dict[str, Any]] = None + custom_env: Optional[Dict[str, str]] = None + + +class UpdateAgentRequest(BaseModel): + name: Optional[str] = Field(None, max_length=255) + description: Optional[str] = None + avatar: Optional[str] = None + status: Optional[AgentStatusLiteral] = None + custom_env: Optional[Dict[str, str]] = None + + +# --------------------------------------------------------------------------- +# Response schemas +# --------------------------------------------------------------------------- + + +class AgentSummary(BaseModel): + id: uuid.UUID + workspace_id: uuid.UUID + name: str + slug: str + description: Optional[str] = None + avatar: Optional[str] = None + status: str + has_custom_env: bool = False + current_draft_version_id: Optional[uuid.UUID] = None + active_release_id: Optional[uuid.UUID] = None + engine_kind: Optional[EngineKind] = None + runtime_kind: Optional[RuntimeKind] = None + created_by: str + created_at: datetime + updated_at: datetime + + model_config = {"from_attributes": True} + + +class AgentResponse(BaseModel): + id: uuid.UUID + workspace_id: uuid.UUID + name: str + slug: str + description: Optional[str] = None + avatar: Optional[str] = None + status: str + has_custom_env: bool = False + current_draft_version_id: Optional[uuid.UUID] = None + active_release_id: Optional[uuid.UUID] = None + engine_kind: Optional[EngineKind] = None + runtime_kind: Optional[RuntimeKind] = None + created_by: str + created_at: datetime + updated_at: datetime + + model_config = {"from_attributes": True} diff --git a/backend/app/schemas/agent_release.py b/backend/app/schemas/agent_release.py new file mode 100644 index 000000000..ece5e6832 --- /dev/null +++ b/backend/app/schemas/agent_release.py @@ -0,0 +1,55 @@ +""" +Pydantic schemas for AgentRelease API. +""" + +from __future__ import annotations + +import uuid +from datetime import datetime +from typing import Optional + +from pydantic import BaseModel + +from app.core.contracts.agent import RuntimeKind +from app.core.contracts.execution import ReleaseStatusLiteral + +# --------------------------------------------------------------------------- +# Request schemas +# --------------------------------------------------------------------------- + + +class CreateAgentReleaseRequest(BaseModel): + agent_version_id: uuid.UUID + runtime_kind: RuntimeKind + builder_kind: Optional[str] = None + runtime_binding: dict = {} + + +# --------------------------------------------------------------------------- +# Response schemas +# --------------------------------------------------------------------------- + + +class AgentReleaseSummary(BaseModel): + id: uuid.UUID + release_number: int + status: ReleaseStatusLiteral + runtime_kind: str + + model_config = {"from_attributes": True} + + +class AgentReleaseResponse(BaseModel): + id: uuid.UUID + agent_version_id: uuid.UUID + release_number: int + status: ReleaseStatusLiteral + runtime_kind: str + builder_kind: Optional[str] = None + executable_ref: Optional[dict] = None + runtime_binding: dict + published_by: Optional[str] = None + published_at: Optional[datetime] = None + retired_at: Optional[datetime] = None + + model_config = {"from_attributes": True} diff --git a/backend/app/schemas/agent_run.py b/backend/app/schemas/agent_run.py new file mode 100644 index 000000000..b5db34cd4 --- /dev/null +++ b/backend/app/schemas/agent_run.py @@ -0,0 +1,53 @@ +""" +Pydantic schemas for AgentRun API. +""" + +from __future__ import annotations + +import uuid +from datetime import datetime +from typing import Optional + +from pydantic import BaseModel + +from app.core.contracts.execution import RunPurposeLiteral, TriggerMediumLiteral + + +class CreateAgentRunRequest(BaseModel): + release_id: uuid.UUID + thread_id: Optional[uuid.UUID] = None + task_id: Optional[uuid.UUID] = None + trigger_medium: TriggerMediumLiteral + run_purpose: RunPurposeLiteral + goal: Optional[str] = None + input_payload: Optional[dict] = None + + +class CreateDraftAgentRunRequest(BaseModel): + agent_id: uuid.UUID + version_id: uuid.UUID + workspace_id: uuid.UUID + goal: Optional[str] = None + input_payload: Optional[dict] = None + + +class AgentRunResponse(BaseModel): + id: uuid.UUID + release_id: Optional[uuid.UUID] + agent_version_id: Optional[uuid.UUID] = None + workspace_id: uuid.UUID + thread_id: Optional[uuid.UUID] + task_id: Optional[uuid.UUID] + trigger_medium: str + run_purpose: str + goal: Optional[str] + input_payload: Optional[dict] + status: str + current_execution_id: Optional[uuid.UUID] + result_summary: Optional[str] + started_at: Optional[datetime] + ended_at: Optional[datetime] + created_by: Optional[str] + created_at: datetime + + model_config = {"from_attributes": True} diff --git a/backend/app/schemas/agent_version.py b/backend/app/schemas/agent_version.py new file mode 100644 index 000000000..0126b2910 --- /dev/null +++ b/backend/app/schemas/agent_version.py @@ -0,0 +1,61 @@ +""" +Pydantic schemas for AgentVersion API. +""" + +from __future__ import annotations + +import uuid +from datetime import datetime +from typing import Any, Dict, Optional + +from pydantic import BaseModel + +from app.core.contracts.agent import EngineKind + +# --------------------------------------------------------------------------- +# Request schemas +# --------------------------------------------------------------------------- + + +class CreateAgentVersionRequest(BaseModel): + source_kind: Optional[str] = "manual" + engine_kind: EngineKind = "langgraph_visual" + definition_payload: Optional[Dict[str, Any]] = None + capability_manifest: Optional[Dict[str, Any]] = None + changelog: Optional[str] = None + + +class UpdateAgentVersionRequest(BaseModel): + definition_payload: Optional[Dict[str, Any]] = None + capability_manifest: Optional[Dict[str, Any]] = None + changelog: Optional[str] = None + + +# --------------------------------------------------------------------------- +# Response schemas +# --------------------------------------------------------------------------- + + +class AgentVersionSummary(BaseModel): + id: uuid.UUID + version_number: int + status: str + engine_kind: str + + model_config = {"from_attributes": True} + + +class AgentVersionResponse(BaseModel): + id: uuid.UUID + agent_id: uuid.UUID + version_number: int + status: str + source_kind: str + engine_kind: str + definition_payload: Dict[str, Any] + capability_manifest: Dict[str, Any] + changelog: Optional[str] = None + created_by: str + created_at: datetime + + model_config = {"from_attributes": True} diff --git a/backend/app/schemas/artifact.py b/backend/app/schemas/artifact.py new file mode 100644 index 000000000..306042985 --- /dev/null +++ b/backend/app/schemas/artifact.py @@ -0,0 +1,17 @@ +from __future__ import annotations + +import uuid +from datetime import datetime +from typing import Optional + +from pydantic import BaseModel + + +class ArtifactResponse(BaseModel): + id: uuid.UUID + execution_id: uuid.UUID + kind: str + uri: str + metadata: Optional[dict] = None + created_at: datetime + model_config = {"from_attributes": True} diff --git a/backend/app/schemas/common.py b/backend/app/schemas/common.py index fef124f74..decb96766 100644 --- a/backend/app/schemas/common.py +++ b/backend/app/schemas/common.py @@ -4,11 +4,9 @@ import uuid from datetime import datetime -from typing import Any, Generic, List, Optional, TypeVar +from typing import Generic, List, TypeVar -from pydantic import BaseModel, ConfigDict, Field - -from app.schemas.base import BaseResponse +from pydantic import BaseModel, ConfigDict T = TypeVar("T") @@ -40,34 +38,3 @@ class IDSchema(TimestampSchema): """Schema with ID.""" id: uuid.UUID - - -# Session schemas -class SessionCreate(BaseModel): - """Create session request.""" - - title: Optional[str] = "New Session" - workspace_path: Optional[str] = None - - -class SessionResponse(BaseResponse): - """Session response.""" - - session_id: str - title: str - workspace_path: str - is_active: bool - created_at: datetime - updated_at: datetime - message_count: int = 0 - - -class SessionMessageResponse(BaseModel): - """Session message item (legacy sessions API).""" - - id: uuid.UUID - session_id: str - content: str - role: str - metadata: dict[str, Any] = Field(default_factory=dict) - created_at: datetime diff --git a/backend/app/schemas/conversation.py b/backend/app/schemas/conversation.py deleted file mode 100644 index 3a5e1da40..000000000 --- a/backend/app/schemas/conversation.py +++ /dev/null @@ -1,109 +0,0 @@ -""" -Conversation Pydantic schemas - -Request and response validation for conversation management. -""" - -import uuid -from datetime import datetime -from typing import Any - -from pydantic import BaseModel, ConfigDict, Field - -from app.schemas.validators import EnhancedBaseModel - - -class ConversationCreate(EnhancedBaseModel): - """Create conversation request.""" - - # user_id is obtained from authentication; no longer needed in the request - title: str = Field(default="New Conversation", min_length=1, max_length=200, description="conversation title") - metadata: dict[str, Any] = Field(default_factory=dict, description="metadata") - - -class ConversationUpdate(EnhancedBaseModel): - """Update conversation request.""" - - title: str | None = Field(None, min_length=1, max_length=200, description="conversation title") - metadata: dict[str, Any] | None = Field(None, description="metadata") - - -class ConversationResponse(BaseModel): - """Conversation response.""" - - model_config = ConfigDict(from_attributes=True) - - id: uuid.UUID = Field(..., description="conversation ID") - thread_id: str = Field(..., description="thread ID") - user_id: str = Field(..., description="user ID (text)") - title: str = Field(..., description="conversation title") - metadata: dict[str, Any] = Field(default_factory=dict, description="metadata") - created_at: datetime = Field(..., description="creation time") - updated_at: datetime = Field(..., description="update time") - message_count: int = Field(default=0, description="message count") - - -class ConversationDetailResponse(BaseModel): - """Conversation detail response.""" - - conversation: ConversationResponse - messages: list[dict[str, Any]] = Field(default_factory=list, description="message list") - - -class ConversationExportResponse(BaseModel): - """Conversation export response.""" - - conversation: dict[str, Any] - messages: list[dict[str, Any]] - state: dict[str, Any] | None = None - - -class ConversationImportRequest(BaseModel): - """Conversation import request.""" - - # user_id is obtained from authentication; no longer needed in the request - data: dict[str, Any] = Field(..., description="import data") - - -class CheckpointResponse(BaseModel): - """Checkpoint response.""" - - thread_id: str - checkpoints: list[dict[str, Any]] - - -class SearchRequest(BaseModel): - """Search request.""" - - # user_id is obtained from authentication; no longer needed in the request - query: str = Field(..., description="search keyword") - skip: int = Field(default=0, ge=0, description="number to skip") - limit: int = Field(default=20, ge=1, le=100, description="number to return") - - -class SearchResponse(BaseModel): - """Search response.""" - - query: str - results: list[dict[str, Any]] - - -class UserStatsResponse(BaseModel): - """User statistics response.""" - - user_id: str - total_conversations: int - total_messages: int - recent_conversations: list[dict[str, Any]] - - -class ConversationMessageResponse(BaseModel): - """Conversation message response.""" - - model_config = ConfigDict(from_attributes=True) - - id: uuid.UUID = Field(..., description="message ID") - role: str = Field(..., description="message role") - content: str = Field(..., description="message content") - metadata: dict[str, Any] = Field(default_factory=dict, description="metadata") - created_at: datetime = Field(..., description="creation time") diff --git a/backend/app/schemas/copilot.py b/backend/app/schemas/copilot.py new file mode 100644 index 000000000..879093aa3 --- /dev/null +++ b/backend/app/schemas/copilot.py @@ -0,0 +1,23 @@ +import uuid +from typing import Any, Optional + +from pydantic import BaseModel, Field + + +class CopilotRunRequest(BaseModel): + """Dispatch a copilot interaction through the execution engine.""" + + agent_id: uuid.UUID + version_id: uuid.UUID + workspace_id: uuid.UUID + prompt: str + graph_context: dict[str, Any] + conversation_history: list[dict[str, Any]] = Field(default_factory=list) + mode: str = "deepagents" + provider_name: Optional[str] = None + model_name: Optional[str] = None + + +class CopilotRunResponse(BaseModel): + run_id: str + execution_id: str diff --git a/backend/app/schemas/execution.py b/backend/app/schemas/execution.py new file mode 100644 index 000000000..99cb5be29 --- /dev/null +++ b/backend/app/schemas/execution.py @@ -0,0 +1,79 @@ +""" +Pydantic schemas for Execution APIs. +""" + +from __future__ import annotations + +import uuid +from datetime import datetime +from typing import Optional + +from pydantic import BaseModel + + +class AppErrorPayload(BaseModel): + code: str + message: str + data: dict | None = None + source: str | None = None + retryable: bool = False + user_action: str | None = None + detail: str | None = None + + +# --------------------------------------------------------------------------- +# Execution (Phase 4 - new schema) +# --------------------------------------------------------------------------- + + +class ExecutionResponse(BaseModel): + id: uuid.UUID + run_id: uuid.UUID + parent_execution_id: Optional[uuid.UUID] + attempt_index: int + engine_kind: str + runtime_session_ref: Optional[str] + status: str + error: Optional[AppErrorPayload] + metrics: Optional[dict] + started_at: Optional[datetime] + ended_at: Optional[datetime] + created_at: datetime + + model_config = {"from_attributes": True} + + +class ExecutionEventResponse(BaseModel): + id: uuid.UUID + execution_id: uuid.UUID + sequence_no: int + event_type: str + payload: dict + created_at: datetime + + model_config = {"from_attributes": True} + + +class ExecutionEventItemResponse(BaseModel): + id: uuid.UUID + execution_id: uuid.UUID + seq: int + event_type: str + payload: dict + created_at: datetime + + +class ExecutionEventsPageResponse(BaseModel): + execution_id: uuid.UUID + events: list[ExecutionEventItemResponse] + next_after_seq: int + + +# --------------------------------------------------------------------------- +# Intervention / Approval +# --------------------------------------------------------------------------- + + +class ApproveActionRequest(BaseModel): + approved: bool + message: str | None = None diff --git a/backend/app/schemas/graph_deployment_version.py b/backend/app/schemas/graph_deployment_version.py deleted file mode 100644 index 6c26d2593..000000000 --- a/backend/app/schemas/graph_deployment_version.py +++ /dev/null @@ -1,95 +0,0 @@ -""" -Graph deployment version schemas -""" - -import uuid -from datetime import datetime -from typing import List, Optional - -from pydantic import BaseModel, Field - - -class GraphDeploymentVersionResponse(BaseModel): - """Graph deployment version response.""" - - id: uuid.UUID - version: int - name: Optional[str] = None - is_active: bool - created_at: datetime - created_by: Optional[str] = None - - class Config: - from_attributes = True - - -class GraphDeploymentVersionResponseCamel(BaseModel): - """Graph deployment version response -- camelCase field names.""" - - id: str - version: int - name: Optional[str] = None - isActive: bool - createdAt: str - createdBy: Optional[str] = None - createdByName: Optional[str] = None # creator username - - class Config: - from_attributes = True - - -class GraphDeploymentVersionStateResponse(BaseModel): - """Graph deployment version state response -- includes full nodes, edges, etc.""" - - id: str - version: int - name: Optional[str] = None - isActive: bool - createdAt: str - createdBy: Optional[str] = None - # full graph state; frontend can use this for preview - state: dict = Field(default_factory=dict, description="full version state (nodes, edges, variables)") - - class Config: - from_attributes = True - - -class GraphDeploymentVersionListResponse(BaseModel): - """Graph deployment version list response (paginated).""" - - versions: List[GraphDeploymentVersionResponseCamel] - total: int - page: int = Field(default=1, description="current page") - pageSize: int = Field(default=10, description="page size") - totalPages: int = Field(default=1, description="total pages") - - -class GraphDeployRequest(BaseModel): - """Deploy graph request.""" - - name: Optional[str] = Field(None, description="version name (optional)") - - -class GraphDeployResponse(BaseModel): - """Deploy graph response.""" - - success: bool - message: str - version: int - isActive: bool - needsRedeployment: bool = Field(default=False, description="whether redeployment is needed") - - -class GraphRevertResponse(BaseModel): - """Revert version response.""" - - success: bool - message: str - version: int - is_active: bool - - -class GraphRenameVersionRequest(BaseModel): - """Rename version request.""" - - name: str = Field(..., min_length=1, max_length=255, description="new version name") diff --git a/backend/app/schemas/runs.py b/backend/app/schemas/runs.py deleted file mode 100644 index 36a97c9da..000000000 --- a/backend/app/schemas/runs.py +++ /dev/null @@ -1,85 +0,0 @@ -""" -Schemas for run APIs. -""" - -import uuid -from datetime import datetime -from typing import Any, Optional - -from pydantic import BaseModel, Field - - -class CreateSkillCreatorRunRequest(BaseModel): - message: str = Field(..., description="Initial user prompt") - graph_id: uuid.UUID = Field(..., description="Skill Creator graph id") - thread_id: Optional[str] = Field(None, description="Existing thread id") - edit_skill_id: Optional[str] = Field(None, description="Existing skill id when editing") - - -class CreateRunRequest(BaseModel): - agent_name: str = Field(..., description="Registered agent name") - graph_id: uuid.UUID = Field(..., description="Graph id") - message: str = Field(..., description="Initial user prompt") - thread_id: Optional[str] = Field(None, description="Existing thread id") - input: Optional[dict[str, Any]] = Field(None, description="Agent-specific input payload") - - -class RunSummary(BaseModel): - run_id: uuid.UUID - status: str - run_type: str - agent_name: str - agent_display_name: Optional[str] = None - source: str - thread_id: Optional[str] = None - graph_id: Optional[uuid.UUID] = None - title: Optional[str] = None - started_at: datetime - finished_at: Optional[datetime] = None - last_seq: int - error_code: Optional[str] = None - error_message: Optional[str] = None - last_heartbeat_at: Optional[datetime] = None - updated_at: datetime - - -class CreateRunResponse(BaseModel): - run_id: uuid.UUID - thread_id: str - status: str - - -class RunSnapshotResponse(BaseModel): - run_id: uuid.UUID - status: str - last_seq: int - projection: dict[str, Any] - - -class RunEventResponse(BaseModel): - seq: int - event_type: str - payload: dict[str, Any] - trace_id: Optional[uuid.UUID] = None - observation_id: Optional[uuid.UUID] = None - parent_observation_id: Optional[uuid.UUID] = None - created_at: datetime - - -class RunEventsPageResponse(BaseModel): - run_id: uuid.UUID - events: list[RunEventResponse] - next_after_seq: int - - -class RunListResponse(BaseModel): - items: list[RunSummary] - - -class AgentDefinitionResponse(BaseModel): - agent_name: str - display_name: str - - -class AgentListResponse(BaseModel): - items: list[AgentDefinitionResponse] diff --git a/backend/app/schemas/task.py b/backend/app/schemas/task.py new file mode 100644 index 000000000..4d418b3c4 --- /dev/null +++ b/backend/app/schemas/task.py @@ -0,0 +1,91 @@ +""" +Pydantic schemas for Task and Execution APIs. +""" + +from __future__ import annotations + +import uuid +from datetime import datetime +from typing import Any, Literal, Optional + +from pydantic import BaseModel, Field + +# --------------------------------------------------------------------------- +# Task +# --------------------------------------------------------------------------- + +TaskStatusLiteral = Literal["backlog", "todo", "in_progress", "done", "in_review", "cancelled"] +TaskPriorityLiteral = Literal["none", "low", "medium", "high", "urgent"] + + +class CreateTaskRequest(BaseModel): + workspace_id: uuid.UUID + title: str = Field(..., max_length=500) + description: Optional[str] = None + goal: Optional[str] = None + priority: TaskPriorityLiteral = "none" + agent_id: Optional[uuid.UUID] = None + parent_task_id: Optional[uuid.UUID] = None + tags: Optional[list[str]] = None + position: float = 0.0 + auto_approve: bool = False + + +class UpdateTaskRequest(BaseModel): + title: Optional[str] = Field(None, max_length=500) + description: Optional[str] = None + goal: Optional[str] = None + priority: Optional[TaskPriorityLiteral] = None + status: Optional[TaskStatusLiteral] = None + agent_id: Optional[uuid.UUID] = None + parent_task_id: Optional[uuid.UUID] = None + due_date: Optional[datetime] = None + position: Optional[float] = None + tags: Optional[list[str]] = None + auto_approve: Optional[bool] = None + + +class AssignTaskRequest(BaseModel): + agent_id: uuid.UUID + + +class DispatchTaskRequest(BaseModel): + runtime_config: Optional[dict[str, Any]] = None + + +class TaskSummary(BaseModel): + id: uuid.UUID + workspace_id: uuid.UUID + title: str + description: Optional[str] = None + goal: Optional[str] = None + status: str + priority: str + agent_id: Optional[uuid.UUID] = None + creator_id: str + latest_run_id: Optional[uuid.UUID] = None + parent_task_id: Optional[uuid.UUID] = None + tags: Optional[list[str]] = None + position: float + auto_approve: bool = False + due_date: Optional[datetime] = None + created_at: datetime + updated_at: datetime + + +class TaskListResponse(BaseModel): + items: list[TaskSummary] + + +# --------------------------------------------------------------------------- +# Intervention / Approval +# --------------------------------------------------------------------------- + + +class InjectMessageRequest(BaseModel): + message: str + + +class ApproveActionRequest(BaseModel): + approved: bool + message: str | None = None diff --git a/backend/app/schemas/task_activity.py b/backend/app/schemas/task_activity.py new file mode 100644 index 000000000..aa5e65023 --- /dev/null +++ b/backend/app/schemas/task_activity.py @@ -0,0 +1,41 @@ +""" +Pydantic schemas for Task Activities. +""" + +from __future__ import annotations + +import uuid +from datetime import datetime +from typing import Optional + +from pydantic import BaseModel, ConfigDict, Field + + +class CreateTaskActivityRequest(BaseModel): + content: str = Field(..., min_length=1, max_length=10000) + parent_activity_id: Optional[uuid.UUID] = None + + +class UpdateTaskActivityRequest(BaseModel): + content: str = Field(..., min_length=1, max_length=10000) + + +class TaskActivityResponse(BaseModel): + model_config = ConfigDict(from_attributes=True) + + id: uuid.UUID + task_id: uuid.UUID + workspace_id: uuid.UUID + author_type: str + author_id: str + content: str + type: str + parent_activity_id: Optional[uuid.UUID] = None + created_at: datetime + updated_at: datetime + + +class TaskActivityListResponse(BaseModel): + items: list[TaskActivityResponse] + has_more: bool = False + next_cursor: Optional[str] = None diff --git a/backend/app/schemas/thread.py b/backend/app/schemas/thread.py new file mode 100644 index 000000000..0b8f9d707 --- /dev/null +++ b/backend/app/schemas/thread.py @@ -0,0 +1,98 @@ +""" +Pydantic schemas for Thread API. +""" + +from __future__ import annotations + +import uuid +from datetime import datetime +from typing import Any, Dict, List, Literal, Optional + +from pydantic import BaseModel, Field + +# --------------------------------------------------------------------------- +# Literals +# --------------------------------------------------------------------------- + +ThreadStatusLiteral = Literal["active", "archived"] + +# --------------------------------------------------------------------------- +# Request schemas +# --------------------------------------------------------------------------- + + +class CreateThreadRequest(BaseModel): + agent_id: uuid.UUID + title: Optional[str] = Field(None, max_length=500) + + +class UpdateThreadRequest(BaseModel): + title: Optional[str] = Field(None, max_length=500) + status: Optional[ThreadStatusLiteral] = None + + +class ChatAttachment(BaseModel): + """A file attachment sent alongside a chat message.""" + + filename: str = Field(..., min_length=1, max_length=255) + storage_ref: str = Field( + ..., min_length=1, max_length=500, description="Sandbox path from /v1/files/upload response" + ) + mime_type: str = Field(..., min_length=1, max_length=100) + size_bytes: int = Field(..., gt=0) + + +class ChatRequest(BaseModel): + message: str = Field(..., min_length=1, max_length=10000) + attachments: Optional[List[ChatAttachment]] = Field(None, max_length=10, description="Up to 10 file attachments") + + +# --------------------------------------------------------------------------- +# Response schemas +# --------------------------------------------------------------------------- + + +class ThreadSummary(BaseModel): + id: uuid.UUID + agent_id: uuid.UUID + title: Optional[str] = None + status: str + created_at: datetime + + model_config = {"from_attributes": True} + + +class ThreadResponse(BaseModel): + id: uuid.UUID + agent_id: uuid.UUID + workspace_id: uuid.UUID + title: Optional[str] = None + status: str + created_by: str + created_at: datetime + updated_at: datetime + + model_config = {"from_attributes": True} + + +class ChatResponse(BaseModel): + run_id: uuid.UUID + execution_id: uuid.UUID + + +class ThreadEventResponse(BaseModel): + id: uuid.UUID + run_id: uuid.UUID + execution_id: uuid.UUID + sequence_no: int + event_type: str + payload: Dict[str, Any] + execution_status: str + created_at: datetime + + model_config = {"from_attributes": True} + + +class ThreadEventsListResponse(BaseModel): + events: list[ThreadEventResponse] + total: int diff --git a/backend/app/schemas/validators.py b/backend/app/schemas/validators.py index 09e7433c8..4eb951c64 100644 --- a/backend/app/schemas/validators.py +++ b/backend/app/schemas/validators.py @@ -146,16 +146,20 @@ def validate_max_tokens(tokens: int) -> int: def create_validation_error_response(errors: list) -> dict[str, Any]: """Create a validation error response.""" return { - "success": False, - "code": 422, + "code": "REQUEST_VALIDATION_ERROR", "message": "Request parameter validation failed", - "errors": [ - ValidationErrorDetail( - field=error["loc"][0] if error["loc"] else "unknown", - message=error["msg"], - value=error.get("input"), - type=error["type"], - ).model_dump() - for error in errors - ], + "data": { + "errors": [ + ValidationErrorDetail( + field=error["loc"][0] if error["loc"] else "unknown", + message=error["msg"], + value=error.get("input"), + type=error["type"], + ).model_dump() + for error in errors + ] + }, + "source": "validation", + "retryable": False, + "user_action": "fix_input", } diff --git a/backend/app/services/MODEL.md b/backend/app/services/MODEL.md index ab09fa633..0229bfeca 100644 --- a/backend/app/services/MODEL.md +++ b/backend/app/services/MODEL.md @@ -325,7 +325,7 @@ Service 负责事务管理,通过 `commit()` 方法提交事务。 ### 4. 错误处理 -Service 层处理业务逻辑错误,抛出适当的异常(如 `NotFoundException`、`BadRequestException`)。 +Service 层处理业务逻辑错误,抛出适当的异常(如 `NotFoundError`、`InvalidRequestError`)。 ## 数据流 diff --git a/backend/app/services/__init__.py b/backend/app/services/__init__.py index c3cf97a3a..90926e23c 100644 --- a/backend/app/services/__init__.py +++ b/backend/app/services/__init__.py @@ -3,7 +3,6 @@ """ from .base import BaseService -from .graph_deployment_version_service import GraphDeploymentVersionService from .mcp_client_service import McpClientService, McpConnectionConfig, get_mcp_client from .mcp_server_service import McpServerService from .tool_service import ToolService, initialize_mcp_tools_on_startup @@ -17,6 +16,4 @@ "McpConnectionConfig", "get_mcp_client", "initialize_mcp_tools_on_startup", - # graph deployment version service - "GraphDeploymentVersionService", ] diff --git a/backend/app/services/agent_publish_service.py b/backend/app/services/agent_publish_service.py new file mode 100644 index 000000000..bb63e8bce --- /dev/null +++ b/backend/app/services/agent_publish_service.py @@ -0,0 +1,96 @@ +""" +AgentPublishService — high-level publish/rollback/retire orchestration. + +All sub-service calls share the same AsyncSession. Only this service +calls commit — sub-services only flush. +""" + +from __future__ import annotations + +import uuid + +from sqlalchemy.ext.asyncio import AsyncSession + +from app.common.app_errors import InvalidRequestError, NotFoundError +from app.core.contracts.agent import infer_runtime_kind +from app.models.agent import AgentVersion +from app.repositories.agent import AgentRepository, AgentVersionRepository +from app.schemas.agent_release import CreateAgentReleaseRequest +from app.services.agent_release_service import AgentReleaseService +from app.services.agent_version_service import AgentVersionService + +from .base import BaseService + + +class AgentPublishService(BaseService): + def __init__(self, db: AsyncSession): + super().__init__(db) + self.version_svc = AgentVersionService(db) + self.release_svc = AgentReleaseService(db) + self.agent_repo = AgentRepository(db) + self.version_repo = AgentVersionRepository(db) + + async def publish(self, agent_id: uuid.UUID, user_id: str) -> dict: + agent = await self.agent_repo.get(agent_id) + if not agent: + raise NotFoundError("Agent not found", code="AGENT_NOT_FOUND", data={"agent_id": str(agent_id)}) + + version = await self._resolve_current_draft(agent) + + if version.status == "draft": + await self.version_svc.freeze_version(version.id) + + runtime_kind = infer_runtime_kind(version.engine_kind) + release_data = CreateAgentReleaseRequest( + agent_version_id=version.id, + runtime_kind=runtime_kind, # type: ignore[arg-type] + runtime_binding={}, + ) + release = await self.release_svc.publish_release(agent_id, user_id, release_data) + + await self.release_svc.activate_release(agent_id, release.id) + + # Fork a fresh draft so the editor has a writable version after publish. + await self.version_svc.fork_draft_from(version, user_id) + + await self.safe_commit() + reloaded_agent = await self.agent_repo.get( + agent_id, + relations=["current_draft_version", "active_release"], + ) + return {"agent": reloaded_agent or agent, "release": release} + + async def rollback(self, agent_id: uuid.UUID, release_id: uuid.UUID) -> dict: # type: ignore[override] + await self.release_svc.activate_release(agent_id, release_id) + await self.safe_commit() + agent = await self.agent_repo.get( + agent_id, + relations=["current_draft_version", "active_release"], + ) + return {"agent": agent} + + async def unpublish(self, agent_id: uuid.UUID) -> dict: + release = await self.release_svc.unpublish_release(agent_id) + await self.safe_commit() + agent = await self.agent_repo.get( + agent_id, + relations=["current_draft_version", "active_release"], + ) + return {"agent": agent, "release": release} + + async def retire(self, agent_id: uuid.UUID, release_id: uuid.UUID) -> dict: + release = await self.release_svc.retire_release(agent_id, release_id) + await self.safe_commit() + return {"release": release} + + async def _resolve_current_draft(self, agent) -> AgentVersion: + if not agent.current_draft_version_id: + raise InvalidRequestError("Agent has no draft version", code="AGENT_DRAFT_VERSION_MISSING") + version = await self.version_repo.get(agent.current_draft_version_id) + if not version: + raise NotFoundError( + "Draft version not found", + code="AGENT_DRAFT_VERSION_NOT_FOUND", + data={"version_id": str(agent.current_draft_version_id)}, + ) + return version diff --git a/backend/app/services/agent_registry.py b/backend/app/services/agent_registry.py deleted file mode 100644 index c85fd755f..000000000 --- a/backend/app/services/agent_registry.py +++ /dev/null @@ -1,51 +0,0 @@ -"""Registry for long-running agent run definitions.""" - -from __future__ import annotations - -from dataclasses import dataclass -from typing import Any, Callable - - -@dataclass(frozen=True) -class AgentDefinition: - agent_name: str - display_name: str - run_type: str - reducer: Callable[..., dict[str, Any]] - make_initial_projection: Callable[[dict[str, Any], str], dict[str, Any]] - - -class AgentRegistry: - def __init__(self) -> None: - self._definitions: dict[str, AgentDefinition] = {} - self._bootstrapped = False - - def _ensure_loaded(self) -> None: - if self._bootstrapped: - return - self._bootstrapped = True - from app.services import run_reducers # noqa: F401 - - def register(self, definition: AgentDefinition) -> AgentDefinition: - self._definitions[definition.agent_name] = definition - return definition - - def get(self, agent_name: str) -> AgentDefinition: - self._ensure_loaded() - definition = self.find(agent_name) - if definition is None: - raise KeyError(f"Unknown agent definition: {agent_name}") - return definition - - def find(self, agent_name: str | None) -> AgentDefinition | None: - self._ensure_loaded() - if not agent_name: - return None - return self._definitions.get(agent_name) - - def list_definitions(self) -> list[AgentDefinition]: - self._ensure_loaded() - return sorted(self._definitions.values(), key=lambda definition: definition.display_name.lower()) - - -agent_registry = AgentRegistry() diff --git a/backend/app/services/agent_release_service.py b/backend/app/services/agent_release_service.py new file mode 100644 index 000000000..de43e533d --- /dev/null +++ b/backend/app/services/agent_release_service.py @@ -0,0 +1,186 @@ +""" +AgentReleaseService — manages AgentRelease lifecycle. +""" + +from __future__ import annotations + +import uuid +from typing import List + +from loguru import logger +from sqlalchemy.ext.asyncio import AsyncSession + +from app.common.app_errors import InvalidRequestError, NotFoundError +from app.core.state_machines import AGENT_SM, RELEASE_SM +from app.models.agent import AgentRelease +from app.repositories.agent import AgentRepository, AgentVersionRepository +from app.repositories.agent_release import AgentReleaseRepository +from app.schemas.agent_release import CreateAgentReleaseRequest +from app.utils.datetime import utc_now + +from .base import BaseService + + +class AgentReleaseService(BaseService): + """Manages AgentRelease entities.""" + + def __init__(self, db: AsyncSession): + super().__init__(db) + self.release_repo = AgentReleaseRepository(db) + self.version_repo = AgentVersionRepository(db) + self.agent_repo = AgentRepository(db) + + async def list_releases(self, agent_id: uuid.UUID) -> List[AgentRelease]: + return await self.release_repo.list_by_agent(agent_id) + + async def get_release(self, release_id: uuid.UUID) -> AgentRelease: + release = await self.release_repo.get(release_id) + if not release: + raise NotFoundError( + "Agent release not found", + code="AGENT_RELEASE_NOT_FOUND", + data={"release_id": str(release_id)}, + ) + return release + + async def _get_release_for_agent(self, agent_id: uuid.UUID, release_id: uuid.UUID) -> AgentRelease: + release = await self.release_repo.get(release_id, relations=["version"]) + if not release: + raise NotFoundError( + "Agent release not found", + code="AGENT_RELEASE_NOT_FOUND", + data={"release_id": str(release_id)}, + ) + if release.version.agent_id != agent_id: + raise InvalidRequestError( + "Release does not belong to this agent", + code="AGENT_RELEASE_AGENT_MISMATCH", + data={"agent_id": str(agent_id), "release_id": str(release_id)}, + ) + return release + + async def publish_release( + self, + agent_id: uuid.UUID, + user_id: str, + data: CreateAgentReleaseRequest, + ) -> AgentRelease: + # Verify the version exists and belongs to this agent + version = await self.version_repo.get(data.agent_version_id) + if not version: + raise NotFoundError( + "Agent version not found", + code="AGENT_VERSION_NOT_FOUND", + data={"version_id": str(data.agent_version_id)}, + ) + if version.agent_id != agent_id: + raise InvalidRequestError( + "Version does not belong to this agent", + code="AGENT_VERSION_AGENT_MISMATCH", + data={"agent_id": str(agent_id), "version_id": str(data.agent_version_id)}, + ) + if version.status != "frozen": + raise InvalidRequestError( + "Version must be frozen before publishing a release", + code="AGENT_VERSION_NOT_FROZEN", + data={"version_id": str(data.agent_version_id), "status": version.status}, + ) + + # Auto-increment release_number per agent_version_id + max_num = await self.release_repo.get_max_release_number(data.agent_version_id) + next_num = max_num + 1 + + release = await self.release_repo.create( + { + "agent_version_id": data.agent_version_id, + "release_number": next_num, + "status": "ready", + "runtime_kind": data.runtime_kind, + "builder_kind": data.builder_kind, + "runtime_binding": data.runtime_binding, + "published_by": user_id, + "published_at": utc_now(), + } + ) + + logger.info(f"Published release {release.id} (r{next_num}) for agent {agent_id}") + return release + + async def _supersede_current_active(self, agent) -> None: + """Transition the agent's currently active release to 'superseded'.""" + if not agent.active_release_id: + return + current = await self.release_repo.get(agent.active_release_id) + if current and current.status == "active": + RELEASE_SM.validate(current.status, "superseded") + await self.release_repo.update(current.id, {"status": "superseded"}) + + async def activate_release(self, agent_id: uuid.UUID, release_id: uuid.UUID) -> AgentRelease: + release = await self._get_release_for_agent(agent_id, release_id) + if release.status not in ("ready", "superseded"): + raise InvalidRequestError( + "Only 'ready' or 'superseded' releases can be activated", + code="AGENT_RELEASE_NOT_ACTIVATABLE", + data={"release_id": str(release_id), "status": release.status}, + ) + + agent = await self.agent_repo.get(agent_id) + if not agent: + raise NotFoundError("Agent not found", code="AGENT_NOT_FOUND", data={"agent_id": str(agent_id)}) + + if agent.active_release_id != release_id: + await self._supersede_current_active(agent) + + RELEASE_SM.validate(release.status, "active") + updated = await self.release_repo.update(release_id, {"status": "active"}) + assert updated is not None + + update_data: dict = {"active_release_id": release_id} + if agent.status != "active": + AGENT_SM.validate(agent.status, "active") + update_data["status"] = "active" + await self.agent_repo.update(agent_id, update_data) + logger.info(f"Activated release {release_id} for agent {agent_id}") + return updated + + async def retire_release(self, agent_id: uuid.UUID, release_id: uuid.UUID) -> AgentRelease: + release = await self._get_release_for_agent(agent_id, release_id) + + if release.status == "retired": + return release + if release.status == "active": + raise InvalidRequestError( + "Cannot retire an active release; unpublish first", + code="AGENT_RELEASE_ACTIVE_CANNOT_RETIRE", + data={"release_id": str(release_id)}, + ) + + RELEASE_SM.validate(release.status, "retired") + updated = await self.release_repo.update(release_id, {"status": "retired", "retired_at": utc_now()}) + assert updated is not None + + logger.info(f"Retired release {release_id} for agent {agent_id}") + return updated + + async def unpublish_release(self, agent_id: uuid.UUID) -> AgentRelease | None: + agent = await self.agent_repo.get(agent_id) + if not agent: + raise NotFoundError("Agent not found", code="AGENT_NOT_FOUND", data={"agent_id": str(agent_id)}) + if not agent.active_release_id: + raise InvalidRequestError( + "Agent has no active release to unpublish", + code="AGENT_NOT_PUBLISHED", + data={"agent_id": str(agent_id)}, + ) + + await self._supersede_current_active(agent) + release = await self.release_repo.get(agent.active_release_id) + + update_data: dict = {"active_release_id": None} + if agent.status == "active": + AGENT_SM.validate(agent.status, "draft") + update_data["status"] = "draft" + await self.agent_repo.update(agent_id, update_data) + + logger.info(f"Unpublished agent {agent_id}") + return release diff --git a/backend/app/services/agent_run_service.py b/backend/app/services/agent_run_service.py new file mode 100644 index 000000000..19a70c3fb --- /dev/null +++ b/backend/app/services/agent_run_service.py @@ -0,0 +1,81 @@ +""" +AgentRunService — read-only queries for AgentRun entities. + +All mutations (create / cancel / retry) go through ExecutionOrchestrator +which publishes events through the EventBus. +""" + +from __future__ import annotations + +import uuid +from typing import List, Optional + +from sqlalchemy.ext.asyncio import AsyncSession + +from app.common.app_errors import InvalidRequestError, NotFoundError +from app.models.agent_run import AgentRun +from app.repositories.agent_run import AgentRunRepository + + +class AgentRunService: + """Read-only queries for AgentRun entities.""" + + def __init__(self, db: AsyncSession): + self.db = db + self.run_repo = AgentRunRepository(db) + + async def list_runs( + self, + workspace_id: Optional[uuid.UUID] = None, + release_id: Optional[uuid.UUID] = None, + task_id: Optional[uuid.UUID] = None, + agent_id: Optional[uuid.UUID] = None, + trigger_medium: Optional[str] = None, + run_purpose: Optional[str] = None, + status: Optional[str] = None, + ) -> List[AgentRun]: + """List runs filtered by parameters.""" + if agent_id: + if not workspace_id: + raise InvalidRequestError( + "workspace_id is required when filtering by agent_id", + code="AGENT_RUN_WORKSPACE_REQUIRED", + data={"filter": "agent_id"}, + ) + return await self.run_repo.find_by_agent_and_trigger( + agent_id=agent_id, + workspace_id=workspace_id, + trigger_medium=trigger_medium, + run_purpose=run_purpose, + status=status, + ) + elif task_id: + if not workspace_id: + raise InvalidRequestError( + "workspace_id is required when filtering by task_id", + code="AGENT_RUN_WORKSPACE_REQUIRED", + data={"filter": "task_id"}, + ) + return await self.run_repo.list_by_task(task_id, workspace_id) + elif release_id: + if not workspace_id: + raise InvalidRequestError( + "workspace_id is required when filtering by release_id", + code="AGENT_RUN_WORKSPACE_REQUIRED", + data={"filter": "release_id"}, + ) + return await self.run_repo.list_by_release(release_id, workspace_id) + elif workspace_id: + return await self.run_repo.list_by_workspace(workspace_id) + else: + raise InvalidRequestError( + "Must provide workspace_id, release_id, task_id, or agent_id", + code="AGENT_RUN_FILTER_REQUIRED", + ) + + async def get_run(self, run_id: uuid.UUID) -> AgentRun: + """Get a run by ID.""" + run = await self.run_repo.get(run_id) + if not run: + raise NotFoundError("Agent run not found", code="AGENT_RUN_NOT_FOUND", data={"run_id": str(run_id)}) + return run diff --git a/backend/app/services/agent_service.py b/backend/app/services/agent_service.py new file mode 100644 index 000000000..bfd2e39e8 --- /dev/null +++ b/backend/app/services/agent_service.py @@ -0,0 +1,226 @@ +""" +AgentService — manages Agent lifecycle. +""" + +from __future__ import annotations + +import re +import uuid +from typing import List + +from loguru import logger +from sqlalchemy import delete, exists, select, update +from sqlalchemy.ext.asyncio import AsyncSession + +from app.common.app_errors import NotFoundError, ResourceConflictError +from app.core.model.utils import encrypt_credentials +from app.models.agent import Agent, AgentRelease, AgentVersion +from app.models.agent_run import AgentRun +from app.models.execution import Artifact, Execution, ExecutionEvent +from app.models.task import Task +from app.models.thread import Thread +from app.repositories.agent import AgentRepository, AgentVersionRepository +from app.schemas.agent import CreateAgentRequest, UpdateAgentRequest + +from .base import BaseService + + +def _generate_slug(name: str) -> str: + slug = re.sub(r"[^a-zA-Z0-9\s-]", "", name.lower()) + slug = re.sub(r"[\s]+", "-", slug).strip("-") + return slug or "agent" + + +class AgentService(BaseService): + """Manages the Agent entity and its initial version.""" + + RESPONSE_RELATIONS = ["current_draft_version", "active_release"] + + def __init__(self, db: AsyncSession): + super().__init__(db) + self.agent_repo = AgentRepository(db) + self.version_repo = AgentVersionRepository(db) + + async def list_agents(self, workspace_id: uuid.UUID) -> List[Agent]: + return await self.agent_repo.list_by_workspace(workspace_id) + + async def get_agent(self, agent_id: uuid.UUID) -> Agent: + agent = await self.agent_repo.get( + agent_id, + relations=self.RESPONSE_RELATIONS, + ) + if not agent: + raise NotFoundError("Agent not found", code="AGENT_NOT_FOUND", data={"agent_id": str(agent_id)}) + return agent + + async def create_agent( + self, + workspace_id: uuid.UUID, + user_id: str, + data: CreateAgentRequest, + ) -> Agent: + base_slug = _generate_slug(data.name) + slug = base_slug + suffix = 1 + while await self.agent_repo.get_by_workspace_and_slug(workspace_id, slug): + suffix += 1 + slug = f"{base_slug}-{suffix}" + + # Create the Agent + create_data = { + "workspace_id": workspace_id, + "name": data.name, + "slug": slug, + "description": data.description, + "avatar": data.avatar, + "status": "draft", + "created_by": user_id, + } + if data.custom_env: + create_data["encrypted_custom_env"] = encrypt_credentials(data.custom_env) + + agent = await self.agent_repo.create(create_data) + + # Create an initial draft AgentVersion (v1) + version = await self.version_repo.create( + { + "agent_id": agent.id, + "version_number": 1, + "status": "draft", + "source_kind": "manual", + "engine_kind": data.engine_kind, + "definition_payload": data.definition_payload or {}, + "capability_manifest": data.capability_manifest or {}, + "created_by": user_id, + } + ) + + # Link the draft version + await self.agent_repo.update(agent.id, {"current_draft_version_id": version.id}) + + await self.commit() + reloaded = await self.agent_repo.get(agent.id, relations=self.RESPONSE_RELATIONS) + assert reloaded is not None + logger.info(f"Created agent {agent.id} ({data.name}) with initial version {version.id}") + return reloaded + + async def update_agent( + self, + agent_id: uuid.UUID, + data: UpdateAgentRequest, + ) -> Agent: + agent = await self.agent_repo.get(agent_id) + if not agent: + raise NotFoundError("Agent not found", code="AGENT_NOT_FOUND", data={"agent_id": str(agent_id)}) + + update_data = data.model_dump(exclude_unset=True) + if not update_data: + return agent + + if "custom_env" in update_data: + raw = update_data.pop("custom_env") + if raw: + update_data["encrypted_custom_env"] = encrypt_credentials(raw) + else: + update_data["encrypted_custom_env"] = None + + updated = await self.agent_repo.update(agent_id, update_data) + assert updated is not None + await self.commit() + reloaded = await self.agent_repo.get(agent_id, relations=self.RESPONSE_RELATIONS) + assert reloaded is not None + return reloaded + + async def delete_agent(self, agent_id: uuid.UUID) -> None: + """Delete an agent and all dependent records. + + FK dependency chain: Agent → Versions → Releases → Runs → Executions → Events/Artifacts. + Self-referencing FKs (agent.current_draft_version_id, agent.active_release_id, + runs.current_execution_id, executions.parent_execution_id) must be nullified + before their targets are deleted. + """ + agent = await self.agent_repo.get(agent_id) + if not agent: + raise NotFoundError("Agent not found", code="AGENT_NOT_FOUND", data={"agent_id": str(agent_id)}) + + db = self.db + + has_tasks = (await db.execute(select(exists().where(Task.agent_id == agent_id)))).scalar() + if has_tasks: + raise ResourceConflictError( + "Cannot delete agent: tasks still reference it", + code="AGENT_DELETE_TASK_REFERENCE_CONFLICT", + data={"agent_id": str(agent_id)}, + ) + + version_ids = ( + (await db.execute(select(AgentVersion.id).where(AgentVersion.agent_id == agent_id))).scalars().all() + ) + + release_ids = ( + (await db.execute(select(AgentRelease.id).where(AgentRelease.agent_version_id.in_(version_ids)))) + .scalars() + .all() + if version_ids + else [] + ) + + release_run_ids = ( + (await db.execute(select(AgentRun.id).where(AgentRun.release_id.in_(release_ids)))).scalars().all() + if release_ids + else [] + ) + + draft_run_ids = ( + (await db.execute(select(AgentRun.id).where(AgentRun.agent_version_id.in_(version_ids)))).scalars().all() + if version_ids + else [] + ) + + run_ids = list(dict.fromkeys([*release_run_ids, *draft_run_ids])) + + exec_ids = ( + (await db.execute(select(Execution.id).where(Execution.run_id.in_(run_ids)))).scalars().all() + if run_ids + else [] + ) + + if exec_ids: + await db.execute(delete(ExecutionEvent).where(ExecutionEvent.execution_id.in_(exec_ids))) + await db.execute(delete(Artifact).where(Artifact.execution_id.in_(exec_ids))) + await db.execute( + update(Execution).where(Execution.parent_execution_id.in_(exec_ids)).values(parent_execution_id=None) + ) + + if run_ids: + await db.execute( + update(AgentRun).where(AgentRun.id.in_(run_ids)).values(current_execution_id=None, thread_id=None) + ) + + if exec_ids: + await db.execute(delete(Execution).where(Execution.id.in_(exec_ids))) + + if run_ids: + await db.execute(delete(AgentRun).where(AgentRun.id.in_(run_ids))) + + await db.execute(delete(Thread).where(Thread.agent_id == agent_id)) + + await db.execute( + update(Agent) + .where(Agent.id == agent_id) + .values( + current_draft_version_id=None, + active_release_id=None, + ) + ) + + if release_ids: + await db.execute(delete(AgentRelease).where(AgentRelease.id.in_(release_ids))) + + if version_ids: + await db.execute(delete(AgentVersion).where(AgentVersion.id.in_(version_ids))) + + await db.execute(delete(Agent).where(Agent.id == agent_id)) + + await self.commit() + logger.info(f"Deleted agent {agent_id} and all related records") diff --git a/backend/app/services/agent_version_service.py b/backend/app/services/agent_version_service.py new file mode 100644 index 000000000..8d78bafd6 --- /dev/null +++ b/backend/app/services/agent_version_service.py @@ -0,0 +1,151 @@ +""" +AgentVersionService — manages AgentVersion lifecycle. +""" + +from __future__ import annotations + +import uuid +from typing import List + +from loguru import logger +from sqlalchemy.ext.asyncio import AsyncSession + +from app.common.app_errors import NotFoundError +from app.core.state_machines import VERSION_SM +from app.models.agent import AgentVersion +from app.repositories.agent import AgentRepository, AgentVersionRepository +from app.schemas.agent_version import CreateAgentVersionRequest, UpdateAgentVersionRequest + +from .base import BaseService + + +class AgentVersionService(BaseService): + """Manages AgentVersion entities.""" + + def __init__(self, db: AsyncSession): + super().__init__(db) + self.version_repo = AgentVersionRepository(db) + self.agent_repo = AgentRepository(db) + + async def list_versions(self, agent_id: uuid.UUID) -> List[AgentVersion]: + return await self.version_repo.list_by_agent(agent_id) + + async def get_version(self, version_id: uuid.UUID) -> AgentVersion: + version = await self.version_repo.get(version_id) + if not version: + raise NotFoundError( + "Agent version not found", + code="AGENT_VERSION_NOT_FOUND", + data={"version_id": str(version_id)}, + ) + return version + + async def create_version( + self, + agent_id: uuid.UUID, + user_id: str, + data: CreateAgentVersionRequest, + ) -> AgentVersion: + # Auto-increment version number + max_num = await self.version_repo.get_max_version_number(agent_id) + next_num = max_num + 1 + + version = await self.version_repo.create( + { + "agent_id": agent_id, + "version_number": next_num, + "status": "draft", + "source_kind": data.source_kind or "manual", + "engine_kind": data.engine_kind, + "definition_payload": data.definition_payload or {}, + "capability_manifest": data.capability_manifest or {}, + "changelog": data.changelog, + "created_by": user_id, + } + ) + + # Update agent's current_draft_version_id + await self.agent_repo.update(agent_id, {"current_draft_version_id": version.id}) + + await self.commit() + logger.info(f"Created version {version.id} (v{next_num}) for agent {agent_id}") + return version + + async def update_version( + self, + version_id: uuid.UUID, + data: UpdateAgentVersionRequest, + user_id: str, + ) -> AgentVersion: + version = await self.version_repo.get(version_id) + if not version: + raise NotFoundError( + "Agent version not found", + code="AGENT_VERSION_NOT_FOUND", + data={"version_id": str(version_id)}, + ) + + update_data = data.model_dump(exclude_unset=True) + + # Frozen versions are immutable. If the client still holds a stale + # frozen version_id (race with publish), fork a new draft with the + # update already applied. + if version.status == "frozen": + forked = await self.fork_draft_from(version, user_id, overrides=update_data) + await self.commit() + return forked + + if not update_data: + return version + + updated = await self.version_repo.update(version_id, update_data) + assert updated is not None + await self.commit() + return updated + + async def fork_draft_from( + self, + source: AgentVersion, + user_id: str, + overrides: dict | None = None, + ) -> AgentVersion: + """Create a new draft version copying `source`'s payload, optionally + overlaying `overrides`, and point the agent's current_draft_version_id + at it. Does not commit — caller owns the transaction boundary. + """ + max_num = await self.version_repo.get_max_version_number(source.agent_id) + create_data = { + "agent_id": source.agent_id, + "version_number": max_num + 1, + "status": "draft", + "source_kind": "fork", + "engine_kind": source.engine_kind, + "definition_payload": dict(source.definition_payload or {}), + "capability_manifest": dict(source.capability_manifest or {}), + "changelog": None, + "created_by": user_id, + } + if overrides: + create_data.update(overrides) + new_version = await self.version_repo.create(create_data) + await self.agent_repo.update(source.agent_id, {"current_draft_version_id": new_version.id}) + logger.info( + f"Forked draft version {new_version.id} (v{max_num + 1}) " + f"from {source.id} (v{source.version_number}) for agent {source.agent_id}" + ) + return new_version + + async def freeze_version(self, version_id: uuid.UUID) -> AgentVersion: + version = await self.version_repo.get(version_id) + if not version: + raise NotFoundError( + "Agent version not found", + code="AGENT_VERSION_NOT_FOUND", + data={"version_id": str(version_id)}, + ) + + VERSION_SM.validate(version.status, "frozen") + updated = await self.version_repo.update(version_id, {"status": "frozen"}) + assert updated is not None + logger.info(f"Froze version {version_id}") + return updated diff --git a/backend/app/services/auth_service.py b/backend/app/services/auth_service.py index f73b83d08..968e904c7 100644 --- a/backend/app/services/auth_service.py +++ b/backend/app/services/auth_service.py @@ -7,7 +7,13 @@ from loguru import logger from sqlalchemy.ext.asyncio import AsyncSession -from app.common.exceptions import BadRequestException, UnauthorizedException +from app.common.app_errors import ( + AccessDeniedError, + AuthenticationError, + InternalServiceError, + InvalidRequestError, + ServiceUnavailableError, +) from app.core.security import ( generate_email_verify_token, generate_password_reset_token, @@ -182,10 +188,14 @@ async def register( JWT login response dict containing user info and tokens. Raises: - BadRequestException: If the email is already registered. + InvalidRequestError: If the email is already registered. """ if await self.user_repo.get_by_email(email): - raise BadRequestException("Email already registered") + raise InvalidRequestError( + "Email already registered", + code="USER_ALREADY_EXISTS", + data={"email": email}, + ) user = await self.user_repo.create( { @@ -248,33 +258,33 @@ async def login( JWT login response dict containing user info and tokens. Raises: - UnauthorizedException: If credentials are invalid, the account is + AuthenticationError: If credentials are invalid, the account is inactive, or email verification is required but not completed. """ user = await self.user_repo.get_by_email(email) if not user: - raise UnauthorizedException("Incorrect email or password") + raise AuthenticationError("Incorrect email or password", code="INVALID_CREDENTIALS") login_success = False if not skip_password_check: if not user.hashed_password: - raise UnauthorizedException("Incorrect email or password") + raise AuthenticationError("Incorrect email or password", code="INVALID_CREDENTIALS") if not password: - raise UnauthorizedException("Incorrect email or password") + raise AuthenticationError("Incorrect email or password", code="MISSING_CREDENTIALS") # Validate password format (client-side hashed password) password = password.strip().lower() if len(password) != 64 or not all(c in "0123456789abcdef" for c in password): # Log the specific error internally without exposing to user logger.warning(f"Invalid password format received for login attempt: email={email}") - raise UnauthorizedException("Incorrect email or password") + raise AuthenticationError("Incorrect email or password", code="INVALID_CREDENTIALS") stored_password = user.hashed_password.strip().lower() if len(stored_password) != 64 or not all(c in "0123456789abcdef" for c in stored_password): # Log the internal error but don't expose to user logger.error(f"Invalid stored password format for user: {user.id}") - raise UnauthorizedException("Incorrect email or password") + raise AuthenticationError("Incorrect email or password", code="INVALID_CREDENTIALS") password_match = verify_password(password, stored_password) @@ -294,15 +304,19 @@ async def login( logger.debug("Failed to log login failure audit event", exc_info=True) await self.commit() - raise UnauthorizedException("Incorrect email or password") + raise AuthenticationError("Incorrect email or password", code="INVALID_CREDENTIALS") else: login_success = True if not user.is_active: - raise UnauthorizedException("Inactive user") + raise AuthenticationError("Inactive user", code="USER_INACTIVE") if settings.require_email_verification and not user.email_verified: - raise UnauthorizedException("Email not verified. Please verify your email before logging in.", code=403) + raise AccessDeniedError( + "Email not verified. Please verify your email before logging in.", + code="EMAIL_NOT_VERIFIED", + data={"user_id": user.id, "email": user.email}, + ) if login_success: from app.services.login_init import run_post_login_init @@ -351,13 +365,13 @@ async def reset_password(self, token: str, new_password: str) -> bool: True on success. Raises: - BadRequestException: If the token is invalid or expired. + InvalidRequestError: If the token is invalid or expired. """ user = await self.user_repo.get_by_reset_token(token) if not user: - raise BadRequestException("Invalid or expired reset token") + raise InvalidRequestError("Invalid or expired reset token", code="RESET_TOKEN_INVALID") if user.password_reset_expires and user.password_reset_expires < datetime.now(timezone.utc): - raise BadRequestException("Reset token has expired") + raise InvalidRequestError("Reset token has expired", code="RESET_TOKEN_EXPIRED") user.hashed_password = get_password_hash(new_password) user.password_reset_token = None user.password_reset_expires = None @@ -367,7 +381,7 @@ async def reset_password(self, token: str, new_password: str) -> bool: async def reset_password_for_current_user(self, user: AuthUser, new_password: str) -> bool: """Reset password for the current logged-in user (no old password required).""" if not user or not user.is_active: - raise BadRequestException("User not found or inactive") + raise InvalidRequestError("User not found or inactive", code="USER_INVALID") user.hashed_password = get_password_hash(new_password) await self.commit() return True @@ -383,13 +397,13 @@ async def verify_email(self, token: str) -> bool: True on success. Raises: - BadRequestException: If the token is invalid or expired. + InvalidRequestError: If the token is invalid or expired. """ user = await self.user_repo.get_by_verify_token(token) if not user: - raise BadRequestException("Invalid or expired verification token") + raise InvalidRequestError("Invalid or expired verification token", code="VERIFICATION_TOKEN_INVALID") if user.email_verify_expires and user.email_verify_expires < datetime.now(timezone.utc): - raise BadRequestException("Verification token has expired") + raise InvalidRequestError("Verification token has expired", code="VERIFICATION_TOKEN_EXPIRED") user.email_verified = True user.email_verify_token = None user.email_verify_expires = None @@ -406,10 +420,10 @@ async def resend_verification_email(self, user: AuthUser) -> bool: True on success. Raises: - BadRequestException: If the email is already verified. + InvalidRequestError: If the email is already verified. """ if user.email_verified: - raise BadRequestException("Email already verified") + raise InvalidRequestError("Email already verified", code="EMAIL_ALREADY_VERIFIED") token, expires = generate_email_verify_token() user.email_verify_token = token user.email_verify_expires = expires @@ -427,25 +441,31 @@ async def refresh_token(self, refresh_token: str) -> dict: from app.core.redis import RedisClient if not RedisClient.is_available(): - raise UnauthorizedException("Token refresh service unavailable. Please login again.", code=503) + raise ServiceUnavailableError( + "Token refresh service unavailable. Please login again.", + code="TOKEN_REFRESH_UNAVAILABLE", + ) redis_client = RedisClient.get_client() if not redis_client: - raise UnauthorizedException("Token refresh service unavailable. Please login again.", code=503) + raise ServiceUnavailableError( + "Token refresh service unavailable. Please login again.", + code="TOKEN_REFRESH_UNAVAILABLE", + ) try: refresh_token_key = f"refresh_token:{refresh_token}" user_id = await redis_client.get(refresh_token_key) if not user_id: - raise UnauthorizedException("Invalid or expired refresh token") + raise AuthenticationError("Invalid or expired refresh token", code="REFRESH_TOKEN_INVALID") # user_id from redis is a string, but AuthUser.id is also string # Use get_by method with id parameter user = await self.user_repo.get_by(id=user_id) # type: ignore[arg-type] if not user or not user.is_active: await self._delete_refresh_token(refresh_token, user_id) - raise UnauthorizedException("Invalid user") + raise AuthenticationError("Invalid user", code="USER_INVALID") access_token, new_refresh_token, csrf_token, access_expires, refresh_expires = await self._issue_jwt_tokens( user.id @@ -456,10 +476,13 @@ async def refresh_token(self, refresh_token: str) -> dict: return self._build_jwt_login_response( user, access_token, new_refresh_token, csrf_token, access_expires, refresh_expires ) - except UnauthorizedException: + except AuthenticationError: raise except Exception: - raise UnauthorizedException("Token refresh failed. Please login again.", code=500) + raise InternalServiceError( + "Token refresh failed. Please login again.", + code="TOKEN_REFRESH_FAILED", + ) # ---------------------------------------------------------------- misc async def get_user_by_id(self, user_id: str) -> Optional[AuthUser]: diff --git a/backend/app/services/base.py b/backend/app/services/base.py index 04b7eb3c0..e60723364 100644 --- a/backend/app/services/base.py +++ b/backend/app/services/base.py @@ -4,6 +4,7 @@ from typing import Generic, TypeVar +from loguru import logger from sqlalchemy.ext.asyncio import AsyncSession T = TypeVar("T") @@ -26,3 +27,12 @@ async def commit(self): async def rollback(self): """Roll back the transaction.""" await self.db.rollback() + + async def safe_commit(self): + """Commit with automatic rollback on failure.""" + try: + await self.db.commit() + except Exception: + logger.warning("DB commit failed, rolling back", exc_info=True) + await self.db.rollback() + raise diff --git a/backend/app/services/copilot_service.py b/backend/app/services/copilot_service.py index f43a5d5de..e6c5861c1 100644 --- a/backend/app/services/copilot_service.py +++ b/backend/app/services/copilot_service.py @@ -10,7 +10,7 @@ from loguru import logger -from app.common.exceptions import ModelConfigError +from app.common.app_errors import ModelConfigError from app.core.copilot.action_applier import apply_actions_to_graph_state from app.core.copilot.action_types import ( CopilotResponse, @@ -37,8 +37,6 @@ ) from app.core.copilot.tool_output_parser import parse_tool_output from app.core.copilot.tools import reset_node_registry -from app.repositories.auth_user import AuthUserRepository -from app.services.graph_service import GraphService class CopilotService: @@ -76,6 +74,12 @@ async def _resolve_model(self) -> Any: Uses the same ModelResolver that graph execution uses, ensuring consistent credential handling across the system. """ + if not self.provider_name or not self.model_name: + raise ModelConfigError( + ModelConfigError.BUILD_COPILOT_MODEL_REQUIRED, + "Build Copilot has no model configured. Select a model and try again.", + ) + from app.core.graph.deep_agents.model_resolver import ModelResolver from app.services.model_service import ModelService @@ -84,7 +88,7 @@ async def _resolve_model(self) -> Any: resolver = ModelResolver(model_service, user_id=self.user_id) return await resolver.resolve(model_name=self.model_name, provider_name=self.provider_name) - async def _get_copilot_stream( + async def get_copilot_stream( self, prompt: str, graph_context: Dict[str, Any], @@ -125,7 +129,13 @@ async def _get_copilot_stream( ) except Exception as e: logger.error(f"[CopilotService] Agent creation error: {e}") - yield {"type": "error", "message": f"Failed to create Copilot agent: {str(e)}", "code": "AGENT_ERROR"} + yield { + "type": "error", + "message": f"Failed to create Copilot agent: {str(e)}", + "code": "AGENT_ERROR", + "source": "runtime", + "retryable": False, + } return messages = self._build_messages(prompt, conversation_history) @@ -336,11 +346,11 @@ async def generate_actions_stream( ) -> AsyncGenerator[Dict[str, Any], None]: """ Generate graph actions with streaming (SSE events). - Consumes the unified _get_copilot_stream and yields events; handles top-level errors with code. + Consumes the unified get_copilot_stream and yields events; handles top-level errors with code. """ logger.info(f"[CopilotService] generate_actions_stream start user_id={self.user_id}") try: - async for event in self._get_copilot_stream( + async for event in self.get_copilot_stream( prompt=prompt, graph_context=graph_context, conversation_history=conversation_history, @@ -349,21 +359,34 @@ async def generate_actions_stream( ): yield event except ModelConfigError as e: + yield {"type": "error", **e.to_payload()} + except KeyboardInterrupt: + logger.warning("[CopilotService] Stream interrupted by user") yield { "type": "error", - "message": str(e.detail), - "code": e.error_code, - "params": e.params, + "message": "Request cancelled by user", + "code": "CANCELLED", + "source": "api", + "retryable": False, } - except KeyboardInterrupt: - logger.warning("[CopilotService] Stream interrupted by user") - yield {"type": "error", "message": "Request cancelled by user", "code": "CANCELLED"} except (CopilotLLMError, CopilotAgentError) as e: logger.error(f"[CopilotService] Stream failed: {e}") - yield {"type": "error", "message": str(e), "code": type(e).__name__} + yield { + "type": "error", + "message": str(e), + "code": type(e).__name__, + "source": "runtime", + "retryable": False, + } except Exception as e: logger.exception(f"[CopilotService] generate_actions_stream failed: {e}") - yield {"type": "error", "message": f"An unexpected error occurred: {str(e)}", "code": "UNKNOWN_ERROR"} + yield { + "type": "error", + "message": f"An unexpected error occurred: {str(e)}", + "code": "UNKNOWN_ERROR", + "source": "internal", + "retryable": False, + } def _handle_chat_model_stream_event( self, @@ -528,55 +551,56 @@ def _extract_final_message(self, result: Dict[str, Any]) -> str: return "" async def _persist_graph_from_actions(self, graph_id: str, final_actions: List[Dict[str, Any]]) -> bool: - """Apply actions to graph state and persist in a dedicated transaction. Returns True if saved successfully.""" - from app.core.database import async_session_factory + """Apply actions to graph state and persist via AgentVersion.definition_payload. - async with async_session_factory() as new_db2: - try: - current_user = None - if self.user_id: - user_repo = AuthUserRepository(new_db2) - current_user = await user_repo.get_by(id=self.user_id) - - graph_service = GraphService(new_db2) - graph_uuid = uuid_lib.UUID(graph_id) - current_state = await graph_service.load_graph_state( - graph_id=graph_uuid, - current_user=current_user, - ) + Resolves the agent from graph_id (which is an agent_id in the copilot + context), reads the current draft version's definition_payload, applies + the copilot actions to nodes/edges, and writes the result back. + """ + from sqlalchemy import select - current_nodes = current_state.get("nodes", []) - current_edges = current_state.get("edges", []) + from app.models.agent import Agent, AgentVersion - updated_nodes, updated_edges = apply_actions_to_graph_state( - current_nodes=current_nodes, - current_edges=current_edges, - actions=final_actions, - ) + if not self.db: + logger.warning("[CopilotService] _persist_graph_from_actions: no db session, skipping") + return False - viewport = current_state.get("viewport") - variables = current_state.get("variables") + try: + agent = ( + await self.db.execute(select(Agent).where(Agent.id == uuid_lib.UUID(graph_id))) + ).scalar_one_or_none() - await graph_service.save_graph_state( - graph_id=graph_uuid, - nodes=updated_nodes, - edges=updated_edges, - viewport=viewport, - variables=variables, - current_user=current_user, - ) + if not agent: + logger.warning(f"[CopilotService] _persist_graph_from_actions: agent not found graph_id={graph_id}") + return False - await new_db2.commit() - logger.info( - f"[CopilotService] Async task saved graph state for graph_id={graph_id}, " - f"nodes={len(updated_nodes)}, edges={len(updated_edges)}" - ) - return True - except Exception as e: - if new_db2.in_transaction(): - await new_db2.rollback() - logger.error( - f"[CopilotService] Failed to save graph state for graph_id={graph_id}: {e}", - exc_info=True, - ) + if not agent.current_draft_version_id: + logger.warning(f"[CopilotService] _persist_graph_from_actions: no draft version for agent={graph_id}") return False + + version = ( + await self.db.execute(select(AgentVersion).where(AgentVersion.id == agent.current_draft_version_id)) + ).scalar_one() + + payload = dict(version.definition_payload or {}) + current_nodes: List[Dict[str, Any]] = payload.get("nodes", []) + current_edges: List[Dict[str, Any]] = payload.get("edges", []) + + updated_nodes, updated_edges = apply_actions_to_graph_state(current_nodes, current_edges, final_actions) + + payload["nodes"] = updated_nodes + payload["edges"] = updated_edges + version.definition_payload = payload + + await self.db.commit() + logger.info( + f"[CopilotService] _persist_graph_from_actions ok: " + f"agent={graph_id} actions={len(final_actions)} " + f"nodes={len(updated_nodes)} edges={len(updated_edges)}" + ) + return True + + except Exception as e: + logger.error(f"[CopilotService] _persist_graph_from_actions failed: {e}", exc_info=True) + await self.db.rollback() + return False diff --git a/backend/app/services/custom_tool_service.py b/backend/app/services/custom_tool_service.py index aa8c4c506..6a6a2901e 100644 --- a/backend/app/services/custom_tool_service.py +++ b/backend/app/services/custom_tool_service.py @@ -7,7 +7,7 @@ import uuid from typing import Dict, List, Optional -from app.common.exceptions import BadRequestException, ForbiddenException, NotFoundException +from app.common.app_errors import AccessDeniedError, InvalidRequestError, NotFoundError from app.models.custom_tool import CustomTool from app.repositories.custom_tool import CustomToolRepository @@ -37,12 +37,20 @@ async def create_tool( """Create a tool.""" current_count = await self.repo.count_by_user(owner_id) if current_count >= MAX_TOOLS_PER_USER: - raise BadRequestException("User custom tool quota exceeded") + raise InvalidRequestError( + "User custom tool quota exceeded", + code="CUSTOM_TOOL_QUOTA_EXCEEDED", + data={"limit": MAX_TOOLS_PER_USER}, + ) # check if a tool with the same name exists existing = await self.repo.get_by(owner_id=owner_id, name=name) if existing: - raise BadRequestException("Tool name already exists for this user") + raise InvalidRequestError( + "Tool name already exists for this user", + code="CUSTOM_TOOL_NAME_ALREADY_EXISTS", + data={"name": name}, + ) tool = CustomTool( owner_id=owner_id, @@ -71,16 +79,24 @@ async def update_tool( """Update a tool.""" tool = await self.repo.get(tool_id) if not tool: - raise NotFoundException("Custom tool not found") + raise NotFoundError("Custom tool not found", code="CUSTOM_TOOL_NOT_FOUND", data={"tool_id": str(tool_id)}) # verify ownership if tool.owner_id != current_user_id: - raise ForbiddenException("You can only update your own tools") + raise AccessDeniedError( + "You can only update your own tools", + code="CUSTOM_TOOL_UPDATE_FORBIDDEN", + data={"tool_id": str(tool_id)}, + ) if name and name != tool.name: existing = await self.repo.get_by(owner_id=current_user_id, name=name) if existing: - raise BadRequestException("Tool name already exists for this user") + raise InvalidRequestError( + "Tool name already exists for this user", + code="CUSTOM_TOOL_NAME_ALREADY_EXISTS", + data={"name": name}, + ) tool.name = name if code is not None: tool.code = code @@ -99,11 +115,15 @@ async def delete_tool(self, tool_id: uuid.UUID, current_user_id: str) -> None: """Delete a tool.""" tool = await self.repo.get(tool_id) if not tool: - raise NotFoundException("Custom tool not found") + raise NotFoundError("Custom tool not found", code="CUSTOM_TOOL_NOT_FOUND", data={"tool_id": str(tool_id)}) # verify ownership if tool.owner_id != current_user_id: - raise ForbiddenException("You can only delete your own tools") + raise AccessDeniedError( + "You can only delete your own tools", + code="CUSTOM_TOOL_DELETE_FORBIDDEN", + data={"tool_id": str(tool_id)}, + ) await self.repo.delete_by_id(tool_id) await self.db.commit() diff --git a/backend/app/services/dispatch_service.py b/backend/app/services/dispatch_service.py new file mode 100644 index 000000000..3045a0dd2 --- /dev/null +++ b/backend/app/services/dispatch_service.py @@ -0,0 +1,125 @@ +""" +DispatchService — API layer's single entry point for execution dispatch. + +Wraps ExecutionOrchestrator so that API routes never import from core/engine/. +""" + +from __future__ import annotations + +import uuid + +from sqlalchemy.ext.asyncio import AsyncSession + +from app.models.agent_run import AgentRun +from app.services.execution_orchestrator import ExecutionOrchestrator + + +class DispatchService: + def __init__(self, db: AsyncSession): + self._orchestrator = ExecutionOrchestrator(db) + + async def dispatch_task( + self, + task_id: uuid.UUID, + user_id: str, + prompt_override: str | None = None, + ) -> AgentRun: + return await self._orchestrator.dispatch_task(task_id, user_id, prompt_override) + + async def dispatch_chat( + self, + thread_id: uuid.UUID, + message: str, + user_id: str, + ) -> AgentRun: + return await self._orchestrator.dispatch_chat(thread_id, message, user_id) + + async def dispatch_direct( + self, + release_id: uuid.UUID, + prompt: str, + user_id: str, + trigger_medium: str = "api", + run_purpose: str = "production", + thread_id: uuid.UUID | None = None, + task_id: uuid.UUID | None = None, + input_payload: dict | None = None, + ) -> AgentRun: + return await self._orchestrator.dispatch_direct( + release_id, + prompt, + user_id, + trigger_medium=trigger_medium, + run_purpose=run_purpose, + thread_id=thread_id, + task_id=task_id, + input_payload=input_payload, + ) + + async def dispatch_draft( + self, + agent_id: uuid.UUID, + version_id: uuid.UUID, + prompt: str, + user_id: str, + workspace_id: uuid.UUID, + input_payload: dict | None = None, + ) -> AgentRun: + return await self._orchestrator.dispatch_draft( + agent_id, + version_id, + prompt, + user_id, + workspace_id, + input_payload=input_payload, + ) + + async def dispatch_copilot_draft( + self, + agent_id: uuid.UUID, + version_id: uuid.UUID, + workspace_id: uuid.UUID, + prompt: str, + user_id: str, + graph_context: dict, + conversation_history: list | None = None, + mode: str = "deepagents", + provider_name: str | None = None, + model_name: str | None = None, + ) -> AgentRun: + return await self._orchestrator.dispatch_copilot_draft( + agent_id=agent_id, + version_id=version_id, + workspace_id=workspace_id, + prompt=prompt, + user_id=user_id, + graph_context=graph_context, + conversation_history=conversation_history, + mode=mode, + provider_name=provider_name, + model_name=model_name, + ) + + async def cancel_run(self, run_id: uuid.UUID) -> AgentRun: + return await self._orchestrator.cancel_run(run_id) + + async def retry_run(self, run_id: uuid.UUID, user_id: str) -> AgentRun: + return await self._orchestrator.retry_run(run_id, user_id) + + async def send_message(self, execution_id: uuid.UUID, message: str) -> None: + return await self._orchestrator.send_message(execution_id, message) + + async def emit_user_message( + self, + *, + run: AgentRun, + execution_id: uuid.UUID, + message: str, + attachments: list[dict] | None = None, + ) -> None: + return await self._orchestrator.emit_user_message( + run=run, + execution_id=execution_id, + message=message, + attachments=attachments, + ) diff --git a/backend/app/services/execution_event_adapter.py b/backend/app/services/execution_event_adapter.py new file mode 100644 index 000000000..a393c3967 --- /dev/null +++ b/backend/app/services/execution_event_adapter.py @@ -0,0 +1,196 @@ +""" +ExecutionEventAdapter — implements ExecutionEventPort. + +Bridges core/ execution runners to the event bus without core/ importing services/. +""" + +from __future__ import annotations + +import uuid +from typing import Any, Mapping, Optional, cast + +from sqlalchemy import select +from sqlalchemy.ext.asyncio import AsyncSession + +from app.common.app_errors import InternalServiceError +from app.core.events import ExecutionEventEnvelope, execution_event_bus +from app.core.events.event_types import ExecutionEventType +from app.core.ports.execution import EventContext +from app.models.execution import Execution, ExecutionEvent + + +class ExecutionEventAdapter: + """Implements ExecutionEventPort — publishes execution events through the bus.""" + + def __init__(self, db: AsyncSession): + self.db = db + self._event_ctx: Optional[EventContext] = None + + def set_event_context(self, ctx: EventContext) -> None: + self._event_ctx = ctx + + async def mark_status( + self, + *, + execution_id: uuid.UUID, + status: str, + container_id: Optional[str] = None, + session_id: Optional[str] = None, + error: Mapping[str, Any] | None = None, + result_summary: Optional[dict[str, Any]] = None, + ) -> Optional[Execution]: + ctx = self._event_ctx + if ctx is None: + from app.models.agent_run import AgentRun + + result = await self.db.execute( + select(Execution, AgentRun.workspace_id) + .join(AgentRun, Execution.run_id == AgentRun.id) + .where(Execution.id == execution_id) + ) + row = result.one_or_none() + if not row: + return None + execution, ws_id = row + ctx = EventContext( + run_id=execution.run_id, + workspace_id=ws_id, + ) + else: + execution = None + + envelope = ExecutionEventEnvelope( + execution_id=execution_id, + run_id=ctx.run_id, + workspace_id=ctx.workspace_id, + event_type=ExecutionEventType.EXECUTION_STATUS_CHANGE, + payload={"status": status}, + trigger_medium=ctx.trigger_medium if ctx else None, + run_purpose=ctx.run_purpose if ctx else None, + thread_id=ctx.thread_id if ctx else None, + task_id=ctx.task_id if ctx else None, + target_status=status, + error=dict(error) if error is not None else None, + container_id=container_id or session_id, + metrics=result_summary, + ) + await execution_event_bus.publish(envelope, self.db) + + if execution is None: + execution = cast( + Optional[Execution], + (await self.db.execute(select(Execution).where(Execution.id == execution_id))).scalar_one_or_none(), + ) + else: + await self.db.refresh(execution) + return cast(Optional[Execution], execution) + + async def append_event( + self, + *, + execution_id: uuid.UUID, + event_type: ExecutionEventType, + payload: dict[str, Any], + ) -> ExecutionEvent: + if self._event_ctx is None: + raise InternalServiceError( + "Execution event context is not initialized", + code="EXECUTION_EVENT_CONTEXT_MISSING", + data={"execution_id": str(execution_id)}, + ) + + envelope = ExecutionEventEnvelope( + execution_id=execution_id, + run_id=self._event_ctx.run_id, + workspace_id=self._event_ctx.workspace_id, + event_type=event_type, + payload=payload, + trigger_medium=self._event_ctx.trigger_medium, + run_purpose=self._event_ctx.run_purpose, + thread_id=self._event_ctx.thread_id, + task_id=self._event_ctx.task_id, + ) + await execution_event_bus.publish(envelope, self.db) + + return ExecutionEvent( + execution_id=execution_id, + sequence_no=envelope.seq, + event_type=event_type, + payload=payload, + ) + + async def batch_append_events( + self, + *, + execution_id: uuid.UUID, + events: list[dict[str, Any]], + ) -> list[ExecutionEvent]: + if self._event_ctx is None: + raise InternalServiceError( + "Execution event context is not initialized", + code="EXECUTION_EVENT_CONTEXT_MISSING", + data={"execution_id": str(execution_id)}, + ) + + envelopes = [ + ExecutionEventEnvelope( + execution_id=execution_id, + run_id=self._event_ctx.run_id, + workspace_id=self._event_ctx.workspace_id, + event_type=evt["event_type"], + payload=evt["payload"], + trigger_medium=self._event_ctx.trigger_medium, + run_purpose=self._event_ctx.run_purpose, + thread_id=self._event_ctx.thread_id, + task_id=self._event_ctx.task_id, + ) + for evt in events + ] + await execution_event_bus.publish_batch(envelopes, self.db) + + return [ + ExecutionEvent( + execution_id=execution_id, + sequence_no=env.seq, + event_type=env.event_type, + payload=env.payload, + ) + for env in envelopes + ] + + async def complete_execution( + self, + *, + execution_id: uuid.UUID, + terminal_status: str, + result_summary: Optional[dict[str, Any]] = None, + error: Mapping[str, Any] | None = None, + session_id: Optional[str] = None, + ) -> None: + if self._event_ctx is None: + raise InternalServiceError( + "Execution event context is not initialized", + code="EXECUTION_EVENT_CONTEXT_MISSING", + data={"execution_id": str(execution_id)}, + ) + + envelope = ExecutionEventEnvelope( + execution_id=execution_id, + run_id=self._event_ctx.run_id, + workspace_id=self._event_ctx.workspace_id, + event_type=ExecutionEventType.EXECUTION_COMPLETED, + payload={ + "status": terminal_status, + "error": dict(error) if error is not None else None, + "result_summary": result_summary, + }, + terminal_status=terminal_status, + error=dict(error) if error is not None else None, + container_id=session_id, + metrics=result_summary, + trigger_medium=self._event_ctx.trigger_medium, + run_purpose=self._event_ctx.run_purpose, + thread_id=self._event_ctx.thread_id, + task_id=self._event_ctx.task_id, + ) + await execution_event_bus.publish(envelope, self.db) diff --git a/backend/app/services/execution_orchestrator.py b/backend/app/services/execution_orchestrator.py new file mode 100644 index 000000000..bb28da634 --- /dev/null +++ b/backend/app/services/execution_orchestrator.py @@ -0,0 +1,986 @@ +""" +Execution Orchestrator — service-layer entry point for execution dispatch. + +Layer 2: sits between API/triggers (Layer 1) and engines (Layer 3). +Creates AgentRun + Execution, resolves the engine, builds context, and starts execution. +""" + +from __future__ import annotations + +import uuid +from typing import Any + +from loguru import logger +from sqlalchemy import select +from sqlalchemy.ext.asyncio import AsyncSession + +from app.common.app_errors import AppError, InvalidRequestError, NotFoundError, normalize_app_error +from app.core.engine.protocol import ExecutionContext +from app.core.engine.registry import engine_registry +from app.core.events import ExecutionEventEnvelope, execution_event_bus +from app.core.events.event_types import ExecutionEventType +from app.core.state_machines.transitions import transition_task +from app.models.agent import Agent, AgentRelease, AgentVersion +from app.models.agent_run import AgentRun +from app.models.execution import Execution +from app.models.task import Task +from app.models.thread import Thread +from app.utils.credentials import build_agent_credentials +from app.utils.safe_task import safe_create_task + + +class ExecutionOrchestrator: + """ + Unified dispatch: trigger → Run → Engine → Events. + + All entry points (Task dispatch, Chat, API, Scheduler) go through here. + """ + + def __init__(self, db: AsyncSession): + self.db = db + + # ------------------------------------------------------------------ + # Public dispatch methods (Layer 1 calls these) + # ------------------------------------------------------------------ + + # Statuses from which a task may be dispatched (besides in_progress for re-fire). + DISPATCHABLE_STATUSES = {"backlog", "todo", "in_review"} + + async def dispatch_task( + self, + task_id: uuid.UUID, + user_id: str, + prompt_override: str | None = None, + ) -> AgentRun: + """Dispatch a Task → creates Run + Execution, fires engine.""" + task = await self._get_task(task_id) + if task.status == "in_progress" and task.latest_run_id: + raise InvalidRequestError( + "Task already has an active run. Cancel it first.", + code="TASK_RUN_ALREADY_ACTIVE", + data={"task_id": str(task_id), "run_id": str(task.latest_run_id)}, + ) + if task.status not in self.DISPATCHABLE_STATUSES and task.status != "in_progress": + raise InvalidRequestError( + f"Cannot dispatch task in '{task.status}' status. Move the task back to backlog first.", + code="TASK_STATUS_NOT_DISPATCHABLE", + data={"task_id": str(task_id), "status": task.status}, + ) + if not task.agent_id: + raise InvalidRequestError( + "Task has no assigned agent", + code="TASK_AGENT_MISSING", + data={"task_id": str(task_id)}, + ) + + agent = await self._get_agent(task.agent_id) + if not agent.active_release_id: + raise InvalidRequestError( + f"Agent '{agent.name}' has no active release", + code="AGENT_ACTIVE_RELEASE_MISSING", + data={"agent_id": str(agent.id), "agent_name": agent.name}, + ) + + prompt = prompt_override or task.goal or task.title + run = await self._create_and_fire( + agent=agent, + release_id=agent.active_release_id, + workspace_id=agent.workspace_id, + prompt=prompt, + trigger_medium="system", + run_purpose="production", + task_id=task_id, + user_id=user_id, + ) + + # Update task status + await transition_task(task, "in_progress", self.db, latest_run_id=run.id) + await self.db.commit() + + return run + + async def dispatch_chat( + self, + thread_id: uuid.UUID, + message: str, + user_id: str, + ) -> AgentRun: + """Dispatch from a Thread conversation → creates Run + Execution.""" + thread = (await self.db.execute(select(Thread).where(Thread.id == thread_id))).scalar_one_or_none() + if not thread: + raise NotFoundError("Thread not found", code="THREAD_NOT_FOUND", data={"thread_id": str(thread_id)}) + + agent = await self._get_agent(thread.agent_id) + if not agent.active_release_id: + raise InvalidRequestError( + f"Agent '{agent.name}' has no active release", + code="AGENT_ACTIVE_RELEASE_MISSING", + data={"agent_id": str(agent.id), "agent_name": agent.name}, + ) + + return await self._create_and_fire( + agent=agent, + release_id=agent.active_release_id, + workspace_id=agent.workspace_id, + prompt=message, + trigger_medium="api", + run_purpose="production", + thread_id=thread_id, + user_id=user_id, + ) + + async def dispatch_direct( + self, + release_id: uuid.UUID, + prompt: str, + user_id: str, + trigger_medium: str = "api", + run_purpose: str = "production", + thread_id: uuid.UUID | None = None, + task_id: uuid.UUID | None = None, + input_payload: dict | None = None, + ) -> AgentRun: + """Direct dispatch with explicit release_id (API / Scheduler).""" + release = await self._get_release(release_id) + version = await self._get_version(release.agent_version_id) + agent = await self._get_agent(version.agent_id) + + return await self._create_and_fire( + agent=agent, + release_id=release_id, + workspace_id=agent.workspace_id, + prompt=prompt, + trigger_medium=trigger_medium, + run_purpose=run_purpose, + thread_id=thread_id, + task_id=task_id, + user_id=user_id, + input_payload=input_payload, + ) + + async def dispatch_draft( + self, + agent_id: uuid.UUID, + version_id: uuid.UUID, + prompt: str, + user_id: str, + workspace_id: uuid.UUID, + input_payload: dict | None = None, + ) -> AgentRun: + """Dispatch a Test Lab run against a draft AgentVersion.""" + version = await self._get_version(version_id) + if version.agent_id != agent_id: + raise InvalidRequestError( + "Version does not belong to this agent", + code="AGENT_VERSION_AGENT_MISMATCH", + data={"agent_id": str(agent_id), "version_id": str(version_id)}, + ) + + agent = await self._get_agent(agent_id) + if agent.workspace_id != workspace_id: + raise InvalidRequestError( + "Agent does not belong to this workspace", + code="AGENT_WORKSPACE_MISMATCH", + data={"agent_id": str(agent_id), "workspace_id": str(workspace_id)}, + ) + + return await self._create_and_fire_draft( + agent=agent, + version=version, + workspace_id=workspace_id, + prompt=prompt, + trigger_medium="ui", + run_purpose="draft_test", + user_id=user_id, + input_payload=input_payload, + ) + + async def dispatch_copilot_draft( + self, + agent_id: uuid.UUID, + version_id: uuid.UUID, + workspace_id: uuid.UUID, + prompt: str, + user_id: str, + graph_context: dict, + conversation_history: list | None = None, + mode: str = "deepagents", + provider_name: str | None = None, + model_name: str | None = None, + ) -> AgentRun: + """Dispatch a copilot interaction against a draft AgentVersion.""" + version = await self._get_version(version_id) + if version.agent_id != agent_id: + raise InvalidRequestError( + "Version does not belong to this agent", + code="AGENT_VERSION_AGENT_MISMATCH", + data={"agent_id": str(agent_id), "version_id": str(version_id)}, + ) + + agent = await self._get_agent(agent_id) + if agent.workspace_id != workspace_id: + raise InvalidRequestError( + "Agent does not belong to this workspace", + code="AGENT_WORKSPACE_MISMATCH", + data={"agent_id": str(agent_id), "workspace_id": str(workspace_id)}, + ) + + copilot_payload = self._build_copilot_payload( + agent_id=agent_id, + user_id=user_id, + graph_context=graph_context, + conversation_history=conversation_history, + mode=mode, + provider_name=provider_name, + model_name=model_name, + ) + + return await self._create_and_fire_draft( + agent=agent, + version=version, + workspace_id=workspace_id, + prompt=prompt, + trigger_medium="ui", + run_purpose="internal_builder", + user_id=user_id, + input_payload=copilot_payload, + engine_kind_override="build_copilot", + definition_kind_override="build_copilot", + definition_payload_override=copilot_payload, + ) + + async def dispatch_debug( + self, + agent_id: uuid.UUID, + version_id: uuid.UUID, + prompt: str, + user_id: str, + workspace_id: uuid.UUID, + variables: dict | None = None, + ) -> AgentRun: + """Dispatch a debug run with observation tracing.""" + version = await self._get_version(version_id) + if version.agent_id != agent_id: + raise InvalidRequestError( + "Version does not belong to this agent", + code="AGENT_VERSION_AGENT_MISMATCH", + data={"agent_id": str(agent_id), "version_id": str(version_id)}, + ) + + agent = await self._get_agent(agent_id) + if agent.workspace_id != workspace_id: + raise InvalidRequestError( + "Agent does not belong to this workspace", + code="AGENT_WORKSPACE_MISMATCH", + data={"agent_id": str(agent_id), "workspace_id": str(workspace_id)}, + ) + + run = await self._create_and_fire_draft( + agent=agent, + version=version, + workspace_id=workspace_id, + prompt=prompt, + trigger_medium="ui", + run_purpose="debug", + user_id=user_id, + input_payload={"debug": True, "variables": variables or {}}, + debug=True, + ) + + # Create Trace record for observation tracking + from datetime import datetime, timezone + + from app.core.observation.model import Trace + + if run.current_execution_id: + trace = Trace( + id=run.current_execution_id, + name=agent.name, + workspace_id=workspace_id, + start_time=datetime.now(timezone.utc), + status="running", + execution_id=run.current_execution_id, + agent_version_id=version_id, + user_id=uuid.UUID(user_id) if isinstance(user_id, str) else user_id, + session_id=f"debug-{user_id}-{version_id}-{datetime.now(timezone.utc).date()}", + input={"prompt": prompt, "variables": variables or {}}, + ) + self.db.add(trace) + await self.db.commit() + + return run + + def _resolve_engine(self, execution: Execution, release: AgentRelease): + return engine_registry.get(execution.engine_kind) + + def _resolve_draft_engine_kind(self, version: AgentVersion) -> str: + return version.engine_kind + + def _build_copilot_payload( + self, + *, + agent_id: uuid.UUID, + user_id: str, + graph_context: dict, + conversation_history: list | None, + mode: str, + provider_name: str | None, + model_name: str | None, + ) -> dict: + return { + "graph_context": graph_context, + "conversation_history": conversation_history, + "mode": mode, + "provider_name": provider_name, + "model_name": model_name, + "user_id": user_id, + "graph_id": str(agent_id), + } + + # ------------------------------------------------------------------ + # Cancel / Retry / Message / Event helpers + # ------------------------------------------------------------------ + + async def emit_user_message( + self, + *, + run: AgentRun, + execution_id: uuid.UUID, + message: str, + attachments: list[dict] | None = None, + ) -> None: + """Emit a USER_MESSAGE event for the given execution.""" + payload: dict = {"text": message} + if attachments: + payload["attachments"] = attachments + + envelope = ExecutionEventEnvelope( + execution_id=execution_id, + run_id=run.id, + workspace_id=run.workspace_id, + event_type=ExecutionEventType.USER_MESSAGE, + payload=payload, + trigger_medium=run.trigger_medium, + run_purpose=run.run_purpose, + thread_id=run.thread_id, + task_id=run.task_id, + ) + await execution_event_bus.publish(envelope, self.db) + + async def cancel_run(self, run_id: uuid.UUID) -> AgentRun: + """Cancel a running execution.""" + run = await self._get_run(run_id) + if run.status in ("succeeded", "failed", "cancelled"): + raise InvalidRequestError( + f"Cannot cancel run in status {run.status}", + code="RUN_CANCEL_STATUS_INVALID", + data={"run_id": str(run_id), "status": run.status}, + ) + + execution_id = run.current_execution_id or uuid.UUID(int=0) + + if run.current_execution_id: + execution = ( + await self.db.execute(select(Execution).where(Execution.id == run.current_execution_id)) + ).scalar_one_or_none() + if execution: + if run.release_id: + release = await self._get_release(run.release_id) + engine = self._resolve_engine(execution, release) + elif run.agent_version_id: + version = await self._get_version(run.agent_version_id) + engine = engine_registry.get(self._resolve_draft_engine_kind(version)) + else: + raise InvalidRequestError( + "Run has neither release_id nor agent_version_id", + code="RUN_BINDING_INVALID", + data={"run_id": str(run_id)}, + ) + await engine.cancel(execution.id) + + await execution_event_bus.publish( + ExecutionEventEnvelope( + execution_id=execution_id, + run_id=run.id, + workspace_id=run.workspace_id, + event_type=ExecutionEventType.EXECUTION_COMPLETED, + payload={"status": "cancelled"}, + terminal_status="cancelled", + ), + self.db, + ) + await self.db.refresh(run) + return run + + async def retry_run(self, run_id: uuid.UUID, user_id: str) -> AgentRun: + """Retry a failed/cancelled run with a new Execution attempt.""" + run = await self._get_run(run_id) + if run.status not in ("failed", "cancelled"): + raise InvalidRequestError( + "Can only retry failed or cancelled runs", + code="RUN_RETRY_STATUS_INVALID", + data={"run_id": str(run_id), "status": run.status}, + ) + if not run.release_id: + raise InvalidRequestError( + "Draft Test Lab runs cannot be retried", + code="RUN_RETRY_DRAFT_FORBIDDEN", + data={"run_id": str(run_id)}, + ) + + release = await self._get_release(run.release_id) + version = await self._get_version(release.agent_version_id) + agent = await self._get_agent(version.agent_id) + + # Create new execution attempt + from sqlalchemy import func + + max_attempt = ( + await self.db.execute( + select(func.coalesce(func.max(Execution.attempt_index), 0)).where(Execution.run_id == run_id) + ) + ).scalar() or 0 + + execution = Execution( + run_id=run_id, + attempt_index=max_attempt + 1, + engine_kind=version.engine_kind, + status="pending", + ) + self.db.add(execution) + await self.db.flush() + + run.current_execution_id = execution.id + await self.db.flush() + + await self.publish_run_status_change( + self.db, + run, + execution_id=execution.id, + target_status="running", + ) + await self.db.commit() + + # Fire engine in background + await self._fire_engine( + execution=execution, + release=release, + version=version, + agent=agent, + workspace_id=run.workspace_id, + prompt=run.goal or "", + ) + + await self.db.refresh(run) + return run + + async def send_message(self, execution_id: uuid.UUID, message: str) -> None: + """Inject a message into a running execution.""" + execution = (await self.db.execute(select(Execution).where(Execution.id == execution_id))).scalar_one_or_none() + if not execution: + raise NotFoundError( + "Execution not found", + code="EXECUTION_NOT_FOUND", + data={"execution_id": str(execution_id)}, + ) + + run = await self._get_run(execution.run_id) + if run.release_id: + release = await self._get_release(run.release_id) + engine = self._resolve_engine(execution, release) + elif run.agent_version_id: + version = await self._get_version(run.agent_version_id) + engine = engine_registry.get(self._resolve_draft_engine_kind(version)) + else: + raise InvalidRequestError( + "Run has neither release_id nor agent_version_id", + code="RUN_BINDING_INVALID", + data={"run_id": str(run.id)}, + ) + if not engine.capabilities.supports_message_injection: + raise InvalidRequestError( + "Execution engine does not support message injection", + code="EXECUTION_OPERATION_UNSUPPORTED", + data={ + "operation": "send_message", + "engine_kind": getattr(engine, "engine_kind", execution.engine_kind), + "execution_id": str(execution_id), + }, + ) + await engine.send_message(execution_id, message) + + # ------------------------------------------------------------------ + # Internal: create Run + Execution, fire engine + # ------------------------------------------------------------------ + + async def _create_and_fire( + self, + agent: Agent, + release_id: uuid.UUID, + workspace_id: uuid.UUID, + prompt: str, + trigger_medium: str, + run_purpose: str, + user_id: str, + thread_id: uuid.UUID | None = None, + task_id: uuid.UUID | None = None, + input_payload: dict | None = None, + *, + engine_kind_override: str | None = None, + definition_kind_override: str | None = None, + definition_payload_override: dict | None = None, + ) -> AgentRun: + release = await self._get_release(release_id) + version = await self._get_version(release.agent_version_id) + + # Create Run in pending state — bus will transition to running + run = AgentRun( + release_id=release_id, + workspace_id=workspace_id, + thread_id=thread_id, + task_id=task_id, + trigger_medium=trigger_medium, + run_purpose=run_purpose, + goal=prompt[:500] if prompt else None, + input_payload=input_payload, + status="pending", + created_by=user_id, + ) + self.db.add(run) + await self.db.flush() + + execution = Execution( + run_id=run.id, + attempt_index=1, + engine_kind=engine_kind_override or version.engine_kind, + status="pending", + ) + self.db.add(execution) + await self.db.flush() + + run.current_execution_id = execution.id + await self.db.commit() + + await self.publish_run_status_change( + self.db, + run, + execution_id=execution.id, + target_status="running", + ) + await self.db.refresh(run) + + # Fire engine in background + try: + await self._fire_engine( + execution=execution, + release=release, + version=version, + agent=agent, + workspace_id=workspace_id, + prompt=prompt, + engine_kind_override=engine_kind_override, + definition_kind_override=definition_kind_override, + definition_payload_override=definition_payload_override, + ) + except Exception as exc: + logger.error(f"[Orchestrator] _fire_engine failed: {exc}") + app_error = normalize_app_error(exc, default_code="EXECUTION_FAILED", source="engine") + error_payload = app_error.to_payload() + error_payload.setdefault("data", {})["reason"] = "engine_fire_failed" + await execution_event_bus.publish( + ExecutionEventEnvelope( + execution_id=execution.id, + run_id=run.id, + workspace_id=workspace_id, + event_type=ExecutionEventType.EXECUTION_COMPLETED, + payload={ + "status": "failed", + "error": error_payload, + "result_summary": str(exc)[:2000], + }, + terminal_status="failed", + error=error_payload, + result_summary=str(exc)[:2000], + ), + self.db, + ) + await self.db.refresh(run) + + return run + + async def _create_and_fire_draft( + self, + agent: Agent, + version: AgentVersion, + workspace_id: uuid.UUID, + prompt: str, + trigger_medium: str, + run_purpose: str, + user_id: str, + input_payload: dict | None = None, + *, + debug: bool = False, + engine_kind_override: str | None = None, + definition_kind_override: str | None = None, + definition_payload_override: dict | None = None, + ) -> AgentRun: + self._validate_draft_overrides( + engine_kind_override=engine_kind_override, + definition_kind_override=definition_kind_override, + definition_payload_override=definition_payload_override, + ) + runtime_binding: dict = {} + + run = AgentRun( + release_id=None, + agent_version_id=version.id, + workspace_id=workspace_id, + trigger_medium=trigger_medium, + run_purpose=run_purpose, + goal=prompt[:500] if prompt else None, + input_payload=input_payload, + status="pending", + created_by=user_id, + ) + self.db.add(run) + await self.db.flush() + + execution = Execution( + run_id=run.id, + attempt_index=1, + engine_kind=engine_kind_override or version.engine_kind, + status="pending", + ) + self.db.add(execution) + await self.db.flush() + + run.current_execution_id = execution.id + await self.db.commit() + + await self.publish_run_status_change( + self.db, + run, + execution_id=execution.id, + target_status="running", + ) + await self.db.refresh(run) + + try: + await self._fire_engine( + execution=execution, + release_runtime_binding=runtime_binding, + version=version, + agent=agent, + workspace_id=workspace_id, + prompt=prompt, + engine_kind_override=engine_kind_override, + definition_kind_override=definition_kind_override, + definition_payload_override=definition_payload_override, + debug=debug, + ) + except Exception as exc: + logger.error(f"[Orchestrator] _fire_engine failed for draft run: {exc}") + app_error = normalize_app_error(exc, default_code="EXECUTION_FAILED", source="engine") + error_payload = app_error.to_payload() + error_payload.setdefault("data", {})["reason"] = "engine_fire_failed" + await execution_event_bus.publish( + ExecutionEventEnvelope( + execution_id=execution.id, + run_id=run.id, + workspace_id=workspace_id, + event_type=ExecutionEventType.EXECUTION_COMPLETED, + payload={ + "status": "failed", + "error": error_payload, + "result_summary": str(exc)[:2000], + }, + terminal_status="failed", + error=error_payload, + result_summary=str(exc)[:2000], + ), + self.db, + ) + await self.db.refresh(run) + + return run + + def _validate_draft_overrides( + self, + *, + engine_kind_override: str | None, + definition_kind_override: str | None, + definition_payload_override: dict | None, + ) -> None: + override_presence = ( + engine_kind_override is not None, + definition_kind_override is not None, + definition_payload_override is not None, + ) + if any(override_presence) and not all(override_presence): + raise InvalidRequestError( + "Draft override parameters must be all absent or all present.", + code="DRAFT_OVERRIDE_PARAMETERS_INVALID", + data={ + "engine_kind_override": engine_kind_override is not None, + "definition_kind_override": definition_kind_override is not None, + "definition_payload_override": definition_payload_override is not None, + }, + ) + + async def _fire_engine( + self, + execution: Execution, + version: AgentVersion, + agent: Agent, + workspace_id: uuid.UUID, + prompt: str, + *, + release: AgentRelease | None = None, + release_runtime_binding: dict | None = None, + engine_kind_override: str | None = None, + definition_kind_override: str | None = None, + definition_payload_override: dict | None = None, + debug: bool = False, + ) -> None: + """Build context and fire engine in a background task.""" + credentials = build_agent_credentials(agent) + + # Resolve auto_approve from task if linked + auto_approve = True + run = (await self.db.execute(select(AgentRun).where(AgentRun.id == execution.run_id))).scalar_one() + if run.task_id: + task = (await self.db.execute(select(Task).where(Task.id == run.task_id))).scalar_one_or_none() + if task: + auto_approve = task.auto_approve + + context = ExecutionContext( + db=self.db, + execution_id=execution.id, + run_id=run.id, + workspace_id=workspace_id, + credentials=credentials, + auto_approve=auto_approve, + ) + + # Wire context callbacks (pass run metadata to avoid extra DB query) + run_meta = dict( + trigger_medium=run.trigger_medium, + run_purpose=run.run_purpose, + thread_id=run.thread_id, + task_id=run.task_id, + ) + self._wire_context(context, **run_meta) # type: ignore[arg-type] + + runtime_binding = release_runtime_binding or (release.runtime_binding if release else {}) + engine = engine_registry.get(engine_kind_override or execution.engine_kind) + _def_kind = definition_kind_override or version.engine_kind + _def_payload = definition_payload_override or version.definition_payload + + async def _run_engine(): + from app.core.database import AsyncSessionLocal + + try: + async with AsyncSessionLocal() as db: + # Rebuild context with fresh session + ctx = ExecutionContext( + db=db, + execution_id=execution.id, + run_id=run.id, + workspace_id=workspace_id, + credentials=credentials, + auto_approve=auto_approve, + ) + self._wire_context(ctx, **run_meta) + + collector = None + if debug: + from app.core.observation import ObservationCollector + from app.core.observation.types import ObservationLevel + from app.websocket.execution_subscription_manager import execution_subscription_manager + + async def _db_factory(): + return db + + async def _broadcast(exec_id: Any, message: dict) -> None: + await execution_subscription_manager.broadcast_event(str(exec_id), message) + + collector = ObservationCollector( + trace_id=execution.id, + execution_id=execution.id, + workspace_id=workspace_id, + db_session_factory=_db_factory, + broadcast_fn=_broadcast, + ) + ctx.debug = True + ctx.collector = collector + + try: + await engine.start( + ctx, + release_runtime_binding=runtime_binding, + engine_kind=_def_kind, + definition_payload=_def_payload, + prompt=prompt, + ) + except Exception as exc: + if collector: + collector.record_event( + f"error:{type(exc).__name__}", + input={"message": str(exc)}, + level=ObservationLevel.ERROR, + ) + raise + finally: + if collector: + await collector.finalize() + except Exception as exc: + logger.error(f"[Orchestrator] Engine failed for execution {execution.id}: {exc}") + try: + app_error = normalize_app_error( + exc, + default_code="EXECUTION_ENGINE_FAILED", + default_message="Engine execution failed", + default_data={"execution_id": str(execution.id), "run_id": str(run.id)}, + source="engine", + ) + await ctx._complete_fn("failed", app_error.message[:2000], app_error) + except Exception as cleanup_exc: + logger.error(f"[Orchestrator] Failed to mark execution as failed: {cleanup_exc}") + + safe_create_task(_run_engine(), name=f"engine-{execution.id}") + + def _wire_context( + self, + ctx: ExecutionContext, + *, + trigger_medium: str | None = None, + run_purpose: str | None = None, + thread_id: uuid.UUID | None = None, + task_id: uuid.UUID | None = None, + ) -> None: + """Attach emit/status/complete callbacks to context. + + Run metadata is passed in directly by the caller (who already has the + run object), avoiding an extra DB query. + """ + + def _envelope(**overrides: Any) -> ExecutionEventEnvelope: + return ExecutionEventEnvelope( + execution_id=ctx.execution_id, + run_id=ctx.run_id, + workspace_id=ctx.workspace_id, + trigger_medium=trigger_medium, + run_purpose=run_purpose, + thread_id=thread_id, + task_id=task_id, + **overrides, + ) + + async def _emit(event_type: ExecutionEventType, payload: dict) -> None: + await execution_event_bus.publish( + _envelope(event_type=event_type, payload=payload), + ctx.db, + ) + + async def _status(status: str) -> None: + await execution_event_bus.publish( + _envelope( + event_type=ExecutionEventType.EXECUTION_STATUS_CHANGE, + payload={"status": status}, + target_status=status, + ), + ctx.db, + ) + + async def _complete( + status: str, + result_summary: str | None = None, + error: AppError | None = None, + ) -> None: + error_payload = error.to_payload() if error is not None else None + await execution_event_bus.publish( + _envelope( + event_type=ExecutionEventType.EXECUTION_COMPLETED, + payload={ + "status": status, + "error": error_payload, + }, + terminal_status=status, + error=error_payload, + result_summary=result_summary, + ), + ctx.db, + ) + + ctx._emit_fn = _emit + ctx._status_fn = _status + ctx._complete_fn = _complete + + # ------------------------------------------------------------------ + # Internal: helpers + # ------------------------------------------------------------------ + + @staticmethod + async def publish_run_status_change( + db: AsyncSession, + run: AgentRun, + *, + execution_id: uuid.UUID, + target_status: str, + result_summary: str | None = None, + ) -> None: + """Publish a RUN_STATUS_CHANGE event through the bus.""" + await execution_event_bus.publish( + ExecutionEventEnvelope( + execution_id=execution_id, + run_id=run.id, + workspace_id=run.workspace_id, + event_type=ExecutionEventType.RUN_STATUS_CHANGE, + payload={"status": target_status}, + target_status=target_status, + result_summary=result_summary, + trigger_medium=run.trigger_medium, + run_purpose=run.run_purpose, + thread_id=run.thread_id, + task_id=run.task_id, + ), + db, + ) + + async def _get_task(self, task_id: uuid.UUID) -> Task: + result = (await self.db.execute(select(Task).where(Task.id == task_id))).scalar_one_or_none() + if not result: + raise NotFoundError("Task not found", code="TASK_NOT_FOUND", data={"task_id": str(task_id)}) + return result + + async def _get_agent(self, agent_id: uuid.UUID) -> Agent: + result = (await self.db.execute(select(Agent).where(Agent.id == agent_id))).scalar_one_or_none() + if not result: + raise NotFoundError("Agent not found", code="AGENT_NOT_FOUND", data={"agent_id": str(agent_id)}) + return result + + async def _get_release(self, release_id: uuid.UUID) -> AgentRelease: + result = (await self.db.execute(select(AgentRelease).where(AgentRelease.id == release_id))).scalar_one_or_none() + if not result: + raise NotFoundError( + "Agent release not found", + code="AGENT_RELEASE_NOT_FOUND", + data={"release_id": str(release_id)}, + ) + return result + + async def _get_version(self, version_id: uuid.UUID) -> AgentVersion: + result = (await self.db.execute(select(AgentVersion).where(AgentVersion.id == version_id))).scalar_one_or_none() + if not result: + raise NotFoundError( + "Agent version not found", + code="AGENT_VERSION_NOT_FOUND", + data={"version_id": str(version_id)}, + ) + return result + + async def _get_run(self, run_id: uuid.UUID) -> AgentRun: + result = (await self.db.execute(select(AgentRun).where(AgentRun.id == run_id))).scalar_one_or_none() + if not result: + raise NotFoundError("Agent run not found", code="AGENT_RUN_NOT_FOUND", data={"run_id": str(run_id)}) + return result diff --git a/backend/app/services/execution_reader_adapter.py b/backend/app/services/execution_reader_adapter.py new file mode 100644 index 000000000..4354e9a6a --- /dev/null +++ b/backend/app/services/execution_reader_adapter.py @@ -0,0 +1,62 @@ +""" +ExecutionReaderAdapter — implements ExecutionReaderPort. + +Wraps the DB queries that ExecutionRunner previously did inline, +so core/ no longer needs direct ORM access. +""" + +from __future__ import annotations + +import uuid +from typing import Optional + +from sqlalchemy import select +from sqlalchemy.ext.asyncio import AsyncSession + +from app.common.app_errors import NotFoundError +from app.models.agent import AgentRelease +from app.models.agent_run import AgentRun +from app.models.execution import Execution +from app.models.task import Task + + +class ExecutionReaderAdapter: + """Implements ExecutionReaderPort — read-only DB queries for core/.""" + + def __init__(self, db: AsyncSession): + self.db = db + + async def get_execution(self, execution_id: uuid.UUID) -> Execution: + result = await self.db.execute(select(Execution).where(Execution.id == execution_id)) + execution = result.scalar_one_or_none() + if not execution: + raise NotFoundError( + "Execution not found", + code="EXECUTION_NOT_FOUND", + data={"execution_id": str(execution_id)}, + ) + return execution + + async def get_run_for_execution(self, execution_id: uuid.UUID) -> AgentRun: + result = await self.db.execute( + select(AgentRun).join(Execution, Execution.run_id == AgentRun.id).where(Execution.id == execution_id) + ) + run = result.scalar_one_or_none() + if not run: + raise NotFoundError( + "Agent run not found for execution", + code="AGENT_RUN_NOT_FOUND", + data={"execution_id": str(execution_id)}, + ) + return run + + async def get_release_for_run(self, run_id: uuid.UUID) -> Optional[AgentRelease]: + result = await self.db.execute( + select(AgentRelease).join(AgentRun, AgentRun.release_id == AgentRelease.id).where(AgentRun.id == run_id) + ) + return result.scalar_one_or_none() + + async def get_task_auto_approve(self, task_id: uuid.UUID) -> bool: + result = await self.db.execute(select(Task.auto_approve).where(Task.id == task_id)) + val = result.scalar_one_or_none() + return val if val is not None else True diff --git a/backend/app/services/execution_service.py b/backend/app/services/execution_service.py new file mode 100644 index 000000000..a31947fc0 --- /dev/null +++ b/backend/app/services/execution_service.py @@ -0,0 +1,402 @@ +""" +Service layer for CLI agent executions. +""" + +from __future__ import annotations + +import uuid +from datetime import timedelta +from typing import Any, List, Mapping, Optional, cast + +from loguru import logger +from sqlalchemy import func, select +from sqlalchemy.ext.asyncio import AsyncSession + +from app.common.app_errors import InternalServiceError, NotFoundError +from app.core.events import ExecutionEventEnvelope, execution_event_bus +from app.core.events.event_types import ExecutionEventType +from app.core.ports.execution import EventContext +from app.core.state_machines.definitions import EXECUTION_TERMINAL +from app.models.execution import ( + Execution, + ExecutionEvent, +) +from app.repositories.execution import ExecutionEventRepository, ExecutionRepository +from app.utils.datetime import utc_now + +TERMINAL_EXECUTION_STATUSES = EXECUTION_TERMINAL + + +class ExecutionService: + """Manages execution lifecycle and event appending.""" + + def __init__(self, db: AsyncSession): + self.db = db + self.repo = ExecutionRepository(db) + self.event_repo = ExecutionEventRepository(db) + self._event_ctx: Optional[EventContext] = None + + def set_event_context(self, ctx: EventContext) -> None: + """Inject run-level metadata so append_event can build full envelopes.""" + self._event_ctx = ctx + + async def get_execution_internal(self, execution_id: uuid.UUID) -> Optional[Execution]: + """Internal use — no user-scope check, no FOR UPDATE lock.""" + result = await self.db.execute(select(Execution).where(Execution.id == execution_id)) + return result.scalar_one_or_none() + + async def get_execution(self, execution_id: uuid.UUID, user_id: Optional[str] = None) -> Optional[Execution]: + """Get execution by ID (user_id kept for API compatibility; no row-level auth here). + + Raises NotFoundError when called without user_id (API path) so callers get a clean 404. + When user_id is provided (WebSocket path) returns None on miss. + """ + execution = await self.get_execution_internal(execution_id) + if execution is None and user_id is None: + raise NotFoundError( + "Execution not found", + code="EXECUTION_NOT_FOUND", + data={"execution_id": str(execution_id)}, + ) + return execution + + async def list_events_after( + self, execution_id: uuid.UUID, user_id: str, after_seq: int = 0, limit: int = 500 + ) -> list[ExecutionEvent]: + execution = await self.get_execution_internal(execution_id) + if not execution: + return [] + result = await self.db.execute( + select(ExecutionEvent) + .where( + ExecutionEvent.execution_id == execution_id, + ExecutionEvent.sequence_no > after_seq, + ) + .order_by(ExecutionEvent.sequence_no.asc()) + .limit(limit) + ) + return list(result.scalars().all()) + + async def mark_status( + self, + *, + execution_id: uuid.UUID, + status: str, + container_id: Optional[str] = None, + session_id: Optional[str] = None, + error: Mapping[str, Any] | None = None, + result_summary: Optional[dict[str, Any]] = None, + ) -> Optional[Execution]: + """Publish a status-change event through the bus. + + StateTransitionSubscriber handles the actual DB transition and + metadata writes in Phase 1 of the bus pipeline. + """ + ctx = self._event_ctx + if ctx is None: + # Fallback for callers without event context (e.g. reaper) + from app.models.agent_run import AgentRun + + result = await self.db.execute( + select(Execution, AgentRun.workspace_id) + .join(AgentRun, Execution.run_id == AgentRun.id) + .where(Execution.id == execution_id) + ) + row = result.one_or_none() + if not row: + return None + execution, ws_id = row + ctx = EventContext( + run_id=execution.run_id, + workspace_id=ws_id, + ) + else: + execution = None + + envelope = ExecutionEventEnvelope( + execution_id=execution_id, + run_id=ctx.run_id, + workspace_id=ctx.workspace_id, + event_type=ExecutionEventType.EXECUTION_STATUS_CHANGE, + payload={"status": status}, + trigger_medium=ctx.trigger_medium, + run_purpose=ctx.run_purpose, + thread_id=ctx.thread_id, + task_id=ctx.task_id, + target_status=status, + error=dict(error) if error is not None else None, + container_id=container_id or session_id, + metrics=result_summary, + ) + await execution_event_bus.publish(envelope, self.db) + + # Return the updated row + if execution is None: + execution = cast( + Optional[Execution], + (await self.db.execute(select(Execution).where(Execution.id == execution_id))).scalar_one_or_none(), + ) + else: + await self.db.refresh(execution) + return cast(Optional[Execution], execution) + + async def append_event( + self, + *, + execution_id: uuid.UUID, + event_type: ExecutionEventType, + payload: dict[str, Any], + ) -> ExecutionEvent: + if self._event_ctx is None: + raise InternalServiceError( + "Execution event context is not initialized", + code="EXECUTION_EVENT_CONTEXT_MISSING", + data={"execution_id": str(execution_id)}, + ) + + envelope = ExecutionEventEnvelope( + execution_id=execution_id, + run_id=self._event_ctx.run_id, + workspace_id=self._event_ctx.workspace_id, + event_type=event_type, + payload=payload, + trigger_medium=self._event_ctx.trigger_medium, + run_purpose=self._event_ctx.run_purpose, + thread_id=self._event_ctx.thread_id, + task_id=self._event_ctx.task_id, + ) + await execution_event_bus.publish(envelope, self.db) + + return ExecutionEvent( + execution_id=execution_id, + sequence_no=envelope.seq, + event_type=event_type, + payload=payload, + ) + + async def batch_append_events( + self, + *, + execution_id: uuid.UUID, + events: list[dict[str, Any]], + ) -> list[ExecutionEvent]: + """Append multiple events in a single transaction via the event bus.""" + if self._event_ctx is None: + raise InternalServiceError( + "Execution event context is not initialized", + code="EXECUTION_EVENT_CONTEXT_MISSING", + data={"execution_id": str(execution_id)}, + ) + + envelopes = [ + ExecutionEventEnvelope( + execution_id=execution_id, + run_id=self._event_ctx.run_id, + workspace_id=self._event_ctx.workspace_id, + event_type=evt["event_type"], + payload=evt["payload"], + trigger_medium=self._event_ctx.trigger_medium, + run_purpose=self._event_ctx.run_purpose, + thread_id=self._event_ctx.thread_id, + task_id=self._event_ctx.task_id, + ) + for evt in events + ] + await execution_event_bus.publish_batch(envelopes, self.db) + + return [ + ExecutionEvent( + execution_id=execution_id, + sequence_no=env.seq, + event_type=env.event_type, + payload=env.payload, + ) + for env in envelopes + ] + + async def complete_execution( + self, + *, + execution_id: uuid.UUID, + terminal_status: str, + result_summary: dict | None = None, + error: Mapping[str, Any] | None = None, + session_id: str | None = None, + ) -> None: + """Publish a single EXECUTION_COMPLETED event with full metadata. + + StateTransitionSubscriber handles Execution + Run terminal transitions. + """ + if self._event_ctx is None: + raise InternalServiceError( + "Execution event context is not initialized", + code="EXECUTION_EVENT_CONTEXT_MISSING", + data={"execution_id": str(execution_id)}, + ) + + envelope = ExecutionEventEnvelope( + execution_id=execution_id, + run_id=self._event_ctx.run_id, + workspace_id=self._event_ctx.workspace_id, + event_type=ExecutionEventType.EXECUTION_COMPLETED, + payload={ + "status": terminal_status, + "error": dict(error) if error is not None else None, + "result_summary": result_summary, + }, + terminal_status=terminal_status, + error=dict(error) if error is not None else None, + container_id=session_id, + metrics=result_summary, + trigger_medium=self._event_ctx.trigger_medium, + run_purpose=self._event_ctx.run_purpose, + thread_id=self._event_ctx.thread_id, + task_id=self._event_ctx.task_id, + ) + await execution_event_bus.publish(envelope, self.db) + + async def list_executions(self, run_id: uuid.UUID) -> List[Execution]: + """List all executions for a run.""" + return await self.repo.list_by_run(run_id) + + async def list_events(self, execution_id: uuid.UUID) -> List[ExecutionEvent]: + """List all events for an execution.""" + return await self.event_repo.list_by_execution(execution_id) + + async def get_snapshot(self, execution_id: uuid.UUID, user_id: Optional[str] = None): + """Return a lightweight snapshot of the execution for WebSocket catch-up. + + Returns an object with ``last_seq`` and ``projection`` attributes. + Falls back to a synthetic snapshot built from the execution row when no + dedicated snapshot table exists. + """ + execution = await self.get_execution_internal(execution_id) + if not execution: + return None + + # Compute last_seq from the events table + result = await self.db.execute( + select(func.coalesce(func.max(ExecutionEvent.sequence_no), 0)).where( + ExecutionEvent.execution_id == execution_id + ) + ) + last_seq = result.scalar() or 0 + + # Build a minimal projection from the execution row itself + class _Snapshot: + last_seq: int + projection: dict[str, Any] + + snap = _Snapshot() + snap.last_seq = last_seq + snap.projection = { + "status": execution.status if isinstance(execution.status, str) else execution.status.value, + "started_at": execution.started_at.isoformat() if execution.started_at else None, + "ended_at": execution.ended_at.isoformat() if execution.ended_at else None, + "error": execution.error, + } + return snap + + async def create_execution( + self, + *, + run_id: uuid.UUID, + runtime_type: str = "claude_code", + parent_execution_id: Optional[uuid.UUID] = None, + ) -> Execution: + """Create an Execution record attached to an existing AgentRun.""" + max_attempt = await ExecutionRepository(self.db).get_max_attempt(run_id) + execution = Execution( + run_id=run_id, + attempt_index=max_attempt + 1, + engine_kind=runtime_type, + status="pending", + parent_execution_id=parent_execution_id, + ) + self.db.add(execution) + await self.db.commit() + await self.db.refresh(execution) + return execution + + async def reap_stale_executions( + self, + thresholds: list[tuple[tuple[str, ...], timedelta]], + ) -> int: + """Discover and reap stale executions. + + For each (statuses, threshold) pair: + 1. Query stale executions → Repository + 2. Cancel active runtime session → session_registry + 3. Mark execution as failed → self.mark_status (bus) + 4. Mark parent run as failed → RUN_STATUS_CHANGE (bus) + + Args: + thresholds: list of ``((status, ...), timedelta)`` pairs defining + which execution statuses to scan and how old they must be. + + Returns: + Total number of reaped executions. + """ + from app.core.agent.cli_backends.session_registry import session_registry + from app.models.agent_run import AgentRun + + now = utc_now() + total = 0 + + for statuses, threshold in thresholds: + stale = await self.repo.list_recoverable_stale( + statuses=statuses, + stale_before=now - threshold, + ) + for execution in stale: + try: + # 1. Cancel active session if any + session = session_registry.get(execution.id) + if session: + await session.cancel() + + # 2. Load run for envelope metadata + run = ( + await self.db.execute(select(AgentRun).where(AgentRun.id == execution.run_id)) + ).scalar_one_or_none() + + # 3. Atomically mark execution + run as failed + error_msg = f"No heartbeat for {int(threshold.total_seconds() // 60)}+ minutes" + stale_error = { + "code": "STALE_REAPED", + "message": error_msg, + "data": {"reason": "stale_execution"}, + "source": "runtime", + "retryable": True, + "user_action": "retry", + } + envelope = ExecutionEventEnvelope( + execution_id=execution.id, + run_id=execution.run_id, + workspace_id=run.workspace_id if run else uuid.UUID(int=0), + event_type=ExecutionEventType.EXECUTION_COMPLETED, + payload={ + "status": "failed", + "error": stale_error, + "result_summary": "Reaped: stale execution", + }, + terminal_status="failed", + error=stale_error, + result_summary="Reaped: stale execution", + trigger_medium=run.trigger_medium if run else None, + run_purpose=run.run_purpose if run else None, + thread_id=run.thread_id if run else None, + task_id=run.task_id if run else None, + ) + await execution_event_bus.publish(envelope, self.db) + + total += 1 + logger.info( + f"Reaped stale execution {execution.id} " + f"(status={execution.status}, " + f"age={now - (execution.started_at or execution.created_at)})" + ) + except Exception as exc: + logger.warning(f"Failed to reap execution {execution.id}: {exc}") + + return total diff --git a/backend/app/services/graph_deployment_version_service.py b/backend/app/services/graph_deployment_version_service.py deleted file mode 100644 index 940fb16cb..000000000 --- a/backend/app/services/graph_deployment_version_service.py +++ /dev/null @@ -1,568 +0,0 @@ -""" -Graph deployment version service. -""" - -from __future__ import annotations - -import json -import uuid -from datetime import datetime, timezone -from typing import Any, Dict, List, Optional - -from sqlalchemy.ext.asyncio import AsyncSession - -from app.common.exceptions import ForbiddenException, NotFoundException -from app.models.auth import AuthUser -from app.models.graph import AgentGraph -from app.models.graph_deployment_version import GraphDeploymentVersion -from app.models.workspace import WorkspaceMemberRole -from app.repositories.auth_user import AuthUserRepository -from app.repositories.graph import GraphEdgeRepository, GraphNodeRepository, GraphRepository -from app.repositories.graph_deployment_version import GraphDeploymentVersionRepository -from app.schemas.graph_deployment_version import ( - GraphDeploymentVersionListResponse, - GraphDeploymentVersionResponseCamel, - GraphDeploymentVersionStateResponse, - GraphDeployResponse, - GraphRevertResponse, -) - -from .base import BaseService -from .workspace_permission import check_workspace_access - - -class GraphDeploymentVersionService(BaseService): - """Graph deployment version service.""" - - def __init__(self, db: AsyncSession): - super().__init__(db) - self.version_repo = GraphDeploymentVersionRepository(db) - self.graph_repo = GraphRepository(db) - self.node_repo = GraphNodeRepository(db) - self.edge_repo = GraphEdgeRepository(db) - self.user_repo = AuthUserRepository(db) - - async def _ensure_access( - self, - graph: AgentGraph, - current_user: AuthUser, - required_role: WorkspaceMemberRole = WorkspaceMemberRole.viewer, - ) -> None: - """Ensure the user has permission to access the graph.""" - if current_user.is_superuser: - return - if graph.user_id == current_user.id: - return - if graph.workspace_id: - has_access = await check_workspace_access( - self.db, - graph.workspace_id, - current_user, - required_role, - ) - if has_access: - return - raise ForbiddenException("No access to graph") - - async def _ensure_can_deploy(self, graph: AgentGraph, current_user: AuthUser) -> None: - """Ensure the user can deploy.""" - if current_user.is_superuser: - return - if graph.user_id == current_user.id: - return - if graph.workspace_id: - has_access = await check_workspace_access( - self.db, - graph.workspace_id, - current_user, - WorkspaceMemberRole.admin, - ) - if has_access: - return - raise ForbiddenException("Only graph owner or workspace admin can deploy") - - def _normalize_graph_state(self, nodes: List, edges: List, variables: Dict[str, Any]) -> Dict[str, Any]: - """Normalize graph state — store into deployment_version.state. - - Important: deep-copy node.data to avoid serialization issues with SQLAlchemy proxy objects. - Also ensure config contains all necessary settings (e.g. model, temp, etc.) - so that a revert can fully restore the state. - """ - import copy - - normalized_nodes = {} - for node in nodes: - node_id = str(node.id) - - # deep-copy data to avoid SQLAlchemy proxy object serialization issues - node_data = copy.deepcopy(dict(node.data)) if node.data else {} - - # ensure config exists - if "config" not in node_data: - node_data["config"] = {} - - config = node_data.get("config", {}) - if not isinstance(config, dict): - config = {} - node_data["config"] = config - - normalized_nodes[node_id] = { - "id": node_id, - "type": node.type, - "position": { - "x": float(node.position_x) if node.position_x else 0, - "y": float(node.position_y) if node.position_y else 0, - }, - "position_absolute": { - "x": float(node.position_absolute_x) if node.position_absolute_x else None, - "y": float(node.position_absolute_y) if node.position_absolute_y else None, - }, - "width": float(node.width) if node.width else 0, - "height": float(node.height) if node.height else 0, - "data": node_data, - } - - normalized_edges = [] - for edge in edges: - normalized_edges.append( - { - "id": str(edge.id), - "source": str(edge.source_node_id), - "target": str(edge.target_node_id), - } - ) - - return { - "nodes": normalized_nodes, - "edges": normalized_edges, - "variables": variables, - "lastSaved": int(datetime.now(timezone.utc).timestamp() * 1000), - } - - def _compute_state_hash(self, state: Dict[str, Any]) -> str: - """Compute a hash of the state for quick comparison.""" - import hashlib - - # exclude lastSaved field since it differs every time - state_copy = {k: v for k, v in state.items() if k != "lastSaved"} - state_json = json.dumps(state_copy, sort_keys=True, ensure_ascii=False) - return hashlib.sha256(state_json.encode()).hexdigest()[:16] - - def _has_graph_changed(self, current_state: Dict[str, Any], deployed_state: Dict[str, Any]) -> bool: - """Check whether the graph has changed (using hash for quick comparison).""" - current_hash = self._compute_state_hash(current_state) - deployed_hash = self._compute_state_hash(deployed_state) - return current_hash != deployed_hash - - async def deploy( - self, graph_id: uuid.UUID, current_user: AuthUser, name: Optional[str] = None - ) -> GraphDeployResponse: - """Deploy a graph.""" - graph = await self.graph_repo.get(graph_id) - if not graph: - raise NotFoundException("Graph not found") - - await self._ensure_can_deploy(graph, current_user) - - nodes = await self.node_repo.list_by_graph(graph_id) - edges = await self.edge_repo.list_by_graph(graph_id) - - current_state = self._normalize_graph_state(nodes, edges, graph.variables) - active_version = await self.version_repo.get_active_version(graph_id) - - # check if there are changes - has_changes = True - if active_version: - has_changes = self._has_graph_changed(current_state, active_version.state) - - # if no changes and already deployed, return current active version info - if not has_changes and graph.is_deployed and active_version: - return GraphDeployResponse( - success=True, - message=f"No changes detected, current version is v{active_version.version}", - version=active_version.version, - isActive=active_version.is_active, - needsRedeployment=False, - ) - - # changes detected or first deploy, create a new version - new_version = await self.version_repo.create_version( - graph_id=graph_id, - state=current_state, - created_by=str(current_user.id), - name=name, - ) - - now = datetime.now(timezone.utc) - await self.graph_repo.update( - graph_id, - { - "is_deployed": True, - "deployed_at": now, - }, - ) - - await self.db.commit() - - return GraphDeployResponse( - success=True, - message=f"Deployed as version {new_version.version}", - version=new_version.version, - isActive=new_version.is_active, - needsRedeployment=False, - ) - - async def undeploy(self, graph_id: uuid.UUID, current_user: AuthUser) -> Dict[str, Any]: - """Undeploy.""" - graph = await self.graph_repo.get(graph_id) - if not graph: - raise NotFoundException("Graph not found") - - await self._ensure_can_deploy(graph, current_user) - - await self.graph_repo.update( - graph_id, - { - "is_deployed": False, - "deployed_at": None, - }, - ) - - await self.db.commit() - - return { - "isDeployed": False, - "deployedAt": None, - } - - async def get_deployment_status(self, graph_id: uuid.UUID, current_user: AuthUser) -> Dict[str, Any]: - """Get deployment status.""" - graph = await self.graph_repo.get(graph_id) - if not graph: - raise NotFoundException("Graph not found") - - await self._ensure_access(graph, current_user) - - active_version = await self.version_repo.get_active_version(graph_id) - - nodes = await self.node_repo.list_by_graph(graph_id) - edges = await self.edge_repo.list_by_graph(graph_id) - current_state = self._normalize_graph_state(nodes, edges, graph.variables) - - needs_redeployment = False - if active_version: - needs_redeployment = self._has_graph_changed(current_state, active_version.state) - else: - needs_redeployment = True - - return { - "isDeployed": graph.is_deployed, - "deployedAt": graph.deployed_at.isoformat() if graph.deployed_at else None, - "deployment": self._to_response_camel(active_version) if active_version else None, - "needsRedeployment": needs_redeployment, - } - - async def list_versions( - self, - graph_id: uuid.UUID, - current_user: AuthUser, - page: int = 1, - page_size: int = 10, - ) -> GraphDeploymentVersionListResponse: - """Get all versions (paginated).""" - graph = await self.graph_repo.get(graph_id) - if not graph: - raise NotFoundException("Graph not found") - - await self._ensure_access(graph, current_user) - - versions, total = await self.version_repo.list_by_graph_paginated(graph_id, page=page, page_size=page_size) - - # batch-fetch usernames - user_ids = list(set(v.created_by for v in versions if v.created_by)) - user_names: Dict[str, str] = {} - for user_id in user_ids: - if user_id: - import uuid as uuid_lib - - try: - user_uuid = uuid_lib.UUID(user_id) if isinstance(user_id, str) else user_id - user = await self.user_repo.get(user_uuid) - if user: - user_names[user_id] = user.name - except (ValueError, TypeError): - pass - - total_pages = (total + page_size - 1) // page_size if page_size > 0 else 1 - - return GraphDeploymentVersionListResponse( - versions=[ - self._to_response_camel(v, user_names.get(v.created_by) if v.created_by else None) for v in versions - ], - total=total, - page=page, - pageSize=page_size, - totalPages=total_pages, - ) - - async def get_version( - self, graph_id: uuid.UUID, version: int, current_user: AuthUser - ) -> GraphDeploymentVersionResponseCamel: - """Get a specific version.""" - graph = await self.graph_repo.get(graph_id) - if not graph: - raise NotFoundException("Graph not found") - - await self._ensure_access(graph, current_user) - - deployment_version = await self.version_repo.get_by_graph_and_version(graph_id, version) - if not deployment_version: - raise NotFoundException("Deployment version not found") - - return self._to_response_camel(deployment_version) - - async def get_version_state( - self, graph_id: uuid.UUID, version: int, current_user: AuthUser - ) -> GraphDeploymentVersionStateResponse: - """Get the full state of a specific version (including nodes, edges, etc. for preview).""" - import copy - - graph = await self.graph_repo.get(graph_id) - if not graph: - raise NotFoundException("Graph not found") - - await self._ensure_access(graph, current_user) - - deployment_version = await self.version_repo.get_by_graph_and_version(graph_id, version) - if not deployment_version: - raise NotFoundException("Deployment version not found") - - # deep-copy state, convert to frontend-expected format - state = copy.deepcopy(deployment_version.state) if deployment_version.state else {} - - # convert state nodes to frontend format (ReactFlow format) - frontend_nodes = [] - nodes_data = state.get("nodes", {}) - for node_id, node_data in nodes_data.items(): - position = node_data.get("position", {"x": 0, "y": 0}) - position_absolute = node_data.get("position_absolute", position) - - frontend_node = { - "id": node_id, - "type": "custom", # ReactFlow uses custom type - "position": position, - "positionAbsolute": { - "x": position_absolute.get("x") if position_absolute else position.get("x", 0), - "y": position_absolute.get("y") if position_absolute else position.get("y", 0), - }, - "width": node_data.get("width", 0), - "height": node_data.get("height", 0), - "data": node_data.get("data", {}), - "selected": False, - "dragging": False, - } - frontend_nodes.append(frontend_node) - - # convert edges format - frontend_edges = [] - edges_data = state.get("edges", []) - for edge_data in edges_data: - frontend_edge = { - "id": edge_data.get("id", f"edge-{edge_data.get('source')}-{edge_data.get('target')}"), - "source": edge_data.get("source"), - "target": edge_data.get("target"), - "animated": True, - "style": {"stroke": "#cbd5e1", "strokeWidth": 1.5}, - } - frontend_edges.append(frontend_edge) - - frontend_state = { - "nodes": frontend_nodes, - "edges": frontend_edges, - "variables": state.get("variables", {}), - } - - return GraphDeploymentVersionStateResponse( - id=str(deployment_version.id), - version=deployment_version.version, - name=deployment_version.name, - isActive=deployment_version.is_active, - createdAt=deployment_version.created_at.isoformat(), - createdBy=deployment_version.created_by, - state=frontend_state, - ) - - async def activate_version( - self, graph_id: uuid.UUID, version: int, current_user: AuthUser - ) -> GraphDeploymentVersionResponseCamel: - """Activate a version.""" - graph = await self.graph_repo.get(graph_id) - if not graph: - raise NotFoundException("Graph not found") - - await self._ensure_can_deploy(graph, current_user) - - activated_version = await self.version_repo.activate_version(graph_id, version) - if not activated_version: - raise NotFoundException("Deployment version not found") - - await self.graph_repo.update( - graph_id, - { - "deployed_at": datetime.now(timezone.utc), - }, - ) - - await self.db.commit() - - return self._to_response_camel(activated_version) - - async def revert_to_version(self, graph_id: uuid.UUID, version: int, current_user: AuthUser) -> GraphRevertResponse: - """Revert to a specific version. - - Restore the full node state from the deployment version, including all settings in data.config. - """ - import copy - - graph = await self.graph_repo.get(graph_id) - if not graph: - raise NotFoundException("Graph not found") - - await self._ensure_can_deploy(graph, current_user) - - target_version = await self.version_repo.get_by_graph_and_version(graph_id, version) - if not target_version: - raise NotFoundException("Deployment version not found") - - # restore nodes/edges data - state = target_version.state - if not state or "nodes" not in state: - raise NotFoundException("Version state is invalid") - - # 1. delete all existing nodes and edges - await self.node_repo.delete_by_graph(graph_id) - await self.edge_repo.delete_by_graph(graph_id) - - # 2. restore nodes (using original IDs) - from app.models.graph import GraphNode - - nodes_data = state["nodes"] - for node_id, node_data in nodes_data.items(): - position = node_data.get("position", {}) - position_absolute = node_data.get("position_absolute") - - # deep-copy data to ensure data integrity - restored_data = copy.deepcopy(node_data.get("data", {})) - - node = GraphNode( - id=uuid.UUID(node_id), # use original ID - graph_id=graph_id, - type=node_data["type"], - position_x=position.get("x", 0) if position else 0, - position_y=position.get("y", 0) if position else 0, - position_absolute_x=position_absolute.get("x") if position_absolute else None, - position_absolute_y=position_absolute.get("y") if position_absolute else None, - width=node_data.get("width", 0), - height=node_data.get("height", 0), - data=restored_data, # full data (including config) - ) - self.db.add(node) - - # flush first to ensure nodes are created - await self.db.flush() - - # 3. restore edges - from app.models.graph import GraphEdge - - edges_data = state.get("edges", []) - for edge_data in edges_data: - edge = GraphEdge( - id=uuid.UUID(edge_data["id"]), # use original ID - graph_id=graph_id, - source_node_id=uuid.UUID(edge_data["source"]), - target_node_id=uuid.UUID(edge_data["target"]), - ) - self.db.add(edge) - - # 4. update variables - await self.graph_repo.update( - graph_id, - { - "variables": state.get("variables", {}), - }, - ) - - # 5. activate version - await self.version_repo.activate_version(graph_id, version) - - await self.graph_repo.update( - graph_id, - { - "deployed_at": datetime.now(timezone.utc), - }, - ) - - await self.db.commit() - - return GraphRevertResponse( - success=True, - message=f"Reverted to version {version}", - version=version, - is_active=True, - ) - - async def rename_version( - self, graph_id: uuid.UUID, version: int, name: str, current_user: AuthUser - ) -> GraphDeploymentVersionResponseCamel: - """Rename a version.""" - graph = await self.graph_repo.get(graph_id) - if not graph: - raise NotFoundException("Graph not found") - - await self._ensure_access(graph, current_user) - - renamed_version = await self.version_repo.rename_version(graph_id, version, name) - if not renamed_version: - raise NotFoundException("Deployment version not found") - - await self.db.commit() - - return self._to_response_camel(renamed_version) - - async def delete_version(self, graph_id: uuid.UUID, version: int, current_user: AuthUser) -> Dict[str, Any]: - """Delete a version.""" - graph = await self.graph_repo.get(graph_id) - if not graph: - raise NotFoundException("Graph not found") - - await self._ensure_can_deploy(graph, current_user) - - target_version = await self.version_repo.get_by_graph_and_version(graph_id, version) - if not target_version: - raise NotFoundException("Deployment version not found") - - # cannot delete the currently active version - if target_version.is_active: - raise ForbiddenException("Cannot delete the active deployment version") - - await self.version_repo.delete_version(graph_id, version) - await self.db.commit() - - return { - "success": True, - "message": f"Version {version} deleted successfully", - } - - def _to_response_camel( - self, version: GraphDeploymentVersion, created_by_name: Optional[str] = None - ) -> GraphDeploymentVersionResponseCamel: - """Convert to camelCase response format.""" - return GraphDeploymentVersionResponseCamel( - id=str(version.id), - version=version.version, - name=version.name, - isActive=version.is_active, - createdAt=version.created_at.isoformat(), - createdBy=version.created_by, - createdByName=created_by_name, - ) diff --git a/backend/app/services/graph_service.py b/backend/app/services/graph_service.py deleted file mode 100644 index 039a65af8..000000000 --- a/backend/app/services/graph_service.py +++ /dev/null @@ -1,920 +0,0 @@ -""" -Graph service. -""" - -import hashlib -import json -import time -import uuid -from typing import Any, Dict, List, Optional, Tuple - -from langgraph.graph.state import CompiledStateGraph -from loguru import logger -from sqlalchemy.ext.asyncio import AsyncSession - -from app.common.exceptions import BadRequestException, ForbiddenException, NotFoundException -from app.core.graph.deep_agents.builder import build_deep_agents_graph -from app.core.graph.node_secrets import ( - hydrate_nodes_a2a_secrets, - prepare_node_data_for_save, - store_a2a_auth_headers, -) -from app.core.graph.runtime_prompt_template import build_runtime_prompt_context -from app.models.auth import AuthUser -from app.models.graph import AgentGraph, GraphEdge, GraphNode, GraphNodeSecret -from app.models.workspace import WorkspaceMemberRole -from app.repositories.graph import GraphEdgeRepository, GraphNodeRepository, GraphRepository - -from .base import BaseService -from .model_service import ModelService -from .workspace_permission import check_workspace_access - -# In-memory compile cache: (graph_id, updated_at_iso, runtime_context_fingerprint) -> (compiled_graph, cached_at_ts). -_compile_cache: Dict[Tuple[str, str, str], Tuple[CompiledStateGraph, float]] = {} -_COMPILE_CACHE_TTL = 300.0 - - -def _invalidate_compile_cache(graph_id: uuid.UUID) -> None: - """Remove any cache entry for this graph (call after save).""" - to_drop = [k for k in _compile_cache if k[0] == str(graph_id)] - for k in to_drop: - _compile_cache.pop(k, None) - - -def _build_runtime_prompt_context_for_cache( - graph: AgentGraph, - *, - user_id: Optional[Any], - thread_id: Optional[str], -) -> Dict[str, Any]: - """Build effective runtime prompt context used by GraphBuilder for cache-keying.""" - return build_runtime_prompt_context(graph, user_id=user_id, thread_id=thread_id) - - -def _normalize_runtime_prompt_context_for_cache(value: Any) -> Any: - """Normalize runtime context into a JSON-serializable deterministic structure.""" - if isinstance(value, dict): - return { - str(key): _normalize_runtime_prompt_context_for_cache(value[key]) - for key in sorted(value, key=lambda item: str(item)) - } - if isinstance(value, list): - return [_normalize_runtime_prompt_context_for_cache(item) for item in value] - if isinstance(value, tuple): - return [_normalize_runtime_prompt_context_for_cache(item) for item in value] - if isinstance(value, set): - normalized_items = [_normalize_runtime_prompt_context_for_cache(item) for item in value] - return sorted( - normalized_items, - key=lambda item: json.dumps(item, sort_keys=True, separators=(",", ":"), ensure_ascii=True), - ) - if isinstance(value, (str, int, float, bool)) or value is None: - return value - return str(value) - - -def _build_runtime_aware_compile_cache_key( - graph: AgentGraph, - *, - user_id: Optional[Any], - thread_id: Optional[str], -) -> Tuple[str, str, str]: - """Build compile cache key that includes effective runtime prompt context fingerprint.""" - runtime_context = _build_runtime_prompt_context_for_cache(graph, user_id=user_id, thread_id=thread_id) - normalized_context = _normalize_runtime_prompt_context_for_cache(runtime_context) - serialized_context = json.dumps(normalized_context, sort_keys=True, separators=(",", ":"), ensure_ascii=True) - context_fingerprint = hashlib.sha256(serialized_context.encode("utf-8")).hexdigest() - updated_at_iso = graph.updated_at.isoformat() if graph.updated_at else "" - return (str(graph.id), updated_at_iso, context_fingerprint) - - -class GraphService(BaseService): - """Graph Service""" - - def __init__(self, db: AsyncSession): - super().__init__(db) - self.graph_repo = GraphRepository(db) - self.node_repo = GraphNodeRepository(db) - self.edge_repo = GraphEdgeRepository(db) - - async def _ensure_access( - self, - graph: AgentGraph, - current_user: AuthUser, - required_role: WorkspaceMemberRole = WorkspaceMemberRole.viewer, - ) -> None: - """ - Ensure the user has permission to access the graph. - - Args: - graph: the graph to access - current_user: current user - required_role: minimum required workspace role (only applies to workspace graphs) - - Raises: - ForbiddenException: if the user has no access - """ - # superuser has all permissions - if current_user.is_superuser: - return - - # if the user owns the graph, allow directly - if graph.user_id == current_user.id: - return - - # if it's a workspace graph, check workspace permissions - if graph.workspace_id: - has_access = await check_workspace_access( - self.db, - graph.workspace_id, - current_user, - required_role, - ) - if has_access: - return - - # no permission - raise ForbiddenException("No access to graph") - - async def _create_graph_with_id( - self, - graph_id: uuid.UUID, - name: str, - user_id: uuid.UUID, - workspace_id: Optional[uuid.UUID] = None, - description: Optional[str] = None, - ) -> AgentGraph: - """ - Create a graph with a specified ID (for upsert scenarios). - - Args: - graph_id: specified graph ID - name: graph name - user_id: user ID - workspace_id: workspace ID (optional) - description: description (optional) - - Returns: - The created graph object - """ - graph_data = { - "id": graph_id, - "name": name, - "user_id": user_id, - "workspace_id": workspace_id, - "description": description, - "is_deployed": False, - "variables": {}, - } - return await self.graph_repo.create(graph_data) - - async def create_graph( - self, - name: str, - user_id: str, - workspace_id: Optional[uuid.UUID] = None, - folder_id: Optional[uuid.UUID] = None, - parent_id: Optional[uuid.UUID] = None, - description: Optional[str] = None, - color: Optional[str] = None, - variables: Optional[Dict[str, Any]] = None, - ) -> AgentGraph: - """ - Create a new graph. - - Args: - name: graph name - user_id: user ID - workspace_id: workspace ID (optional) - folder_id: folder ID (optional) - parent_id: parent graph ID (optional) - description: description (optional) - color: color (optional) - variables: variables (optional) - - Returns: - The created graph object - - Raises: - NotFoundException: if the parent graph does not exist - """ - # validate parent_id exists - if parent_id: - parent_graph = await self.graph_repo.get(parent_id) - if not parent_graph: - raise NotFoundException(f"Parent graph with id {parent_id} not found") - - # validate folder_id exists and belongs to the specified workspace - if folder_id: - from app.repositories.workspace_folder import WorkflowFolderRepository - - folder_repo = WorkflowFolderRepository(self.db) - folder = await folder_repo.get(folder_id) - if not folder: - raise NotFoundException(f"Folder with id {folder_id} not found") - # if workspace_id is specified, ensure the folder belongs to that workspace - if workspace_id and folder.workspace_id != workspace_id: - raise BadRequestException(f"Folder {folder_id} does not belong to workspace {workspace_id}") - # if workspace_id is not specified, derive it from the folder - if not workspace_id: - workspace_id = folder.workspace_id - - graph_data = { - "name": name, - "user_id": user_id, - "workspace_id": workspace_id, - "folder_id": folder_id, - "parent_id": parent_id, - "description": description, - "color": color, - "is_deployed": False, - "variables": variables or {}, - } - return await self.graph_repo.create(graph_data) - - async def save_graph_state( - self, - graph_id: uuid.UUID, - nodes: List[Dict[str, Any]], - edges: List[Dict[str, Any]], - viewport: Optional[Dict[str, Any]] = None, - variables: Optional[Dict[str, Any]] = None, - current_user: Optional[AuthUser] = None, - # upsert params - name: Optional[str] = None, - workspace_id: Optional[uuid.UUID] = None, - ) -> Dict[str, Any]: - """ - Save the complete graph state (nodes and edges) — supports upsert mode. - - If the graph does not exist and a name parameter is provided, automatically create a new graph. - - Frontend format: - { - "nodes": [...], - "edges": [...], - "viewport": {...}, - ... - } - """ - # use a transaction to ensure atomicity: all operations succeed or all fail - # check if already in a transaction to avoid starting a duplicate - if self.db.in_transaction(): - # already in a transaction, execute directly - return await self._save_graph_state_internal( - graph_id=graph_id, - nodes=nodes, - edges=edges, - viewport=viewport, - variables=variables, - current_user=current_user, - name=name, - workspace_id=workspace_id, - ) - else: - # not in a transaction, start a new one - async with self.db.begin(): - return await self._save_graph_state_internal( - graph_id=graph_id, - nodes=nodes, - edges=edges, - viewport=viewport, - variables=variables, - current_user=current_user, - name=name, - workspace_id=workspace_id, - ) - - async def _save_graph_state_internal( - self, - graph_id: uuid.UUID, - nodes: List[Dict[str, Any]], - edges: List[Dict[str, Any]], - viewport: Optional[Dict[str, Any]] = None, - variables: Optional[Dict[str, Any]] = None, - current_user: Optional[AuthUser] = None, - # upsert params - name: Optional[str] = None, - workspace_id: Optional[uuid.UUID] = None, - ) -> Dict[str, Any]: - """Internal method: execute the actual save graph state logic.""" - # get the graph - graph = await self.graph_repo.get(graph_id) - if graph: - # permission check: ensure user has write access to the existing graph - if current_user: - await self._ensure_access(graph, current_user, WorkspaceMemberRole.member) - if not graph: - # upsert mode: if the graph does not exist, auto-create a new graph - if current_user: - # if no workspace_id provided, find the user's default workspace - if not workspace_id: - from app.repositories.workspace import WorkspaceRepository - - workspace_repo = WorkspaceRepository(self.db) - workspace = await workspace_repo.get_by_name_and_owner( - name="Default Workspace", - owner_id=current_user.id, - ) - if workspace: - workspace_id = workspace.id - - # use default name if none provided - graph_name = name or "Untitled Graph" - - import uuid as uuid_lib - - user_uuid = uuid_lib.UUID(current_user.id) if isinstance(current_user.id, str) else current_user.id - graph = await self._create_graph_with_id( - graph_id=graph_id, - name=graph_name, - user_id=user_uuid, - workspace_id=workspace_id, - ) - else: - raise NotFoundException("Graph not found") - - # load existing nodes, build frontend-ID-to-database-ID mapping - existing_nodes = await self.node_repo.list_by_graph(graph_id) - existing_node_map: Dict[str, GraphNode] = {} - for node in existing_nodes: - # frontend uses the database UUID string form as node ID - frontend_id = str(node.id) - existing_node_map[frontend_id] = node - - # create node mapping (frontend ID -> database UUID) - node_id_map: Dict[str, uuid.UUID] = {} - nodes_to_create: List[Dict[str, Any]] = [] - nodes_to_update: List[Tuple[uuid.UUID, Dict[str, Any]]] = [] - - # save nodes - for node_data in nodes: - # convert frontend node format to database format - node_id = node_data.get("id") - if not node_id: - continue - - # try to parse frontend ID as UUID; if successful and node exists, update; otherwise create new node - db_node_id: uuid.UUID - try: - # try to parse frontend ID as UUID - node_id_str = str(node_id) - parsed_uuid = uuid.UUID(node_id_str) - if str(parsed_uuid) in existing_node_map: - # node exists, update - db_node_id = parsed_uuid - nodes_to_update.append((db_node_id, node_data)) - else: - # UUID format but node does not exist, create new node - db_node_id = uuid.uuid4() - nodes_to_create.append(node_data) - except (ValueError, AttributeError): - # frontend ID is not UUID format (e.g. node_xxx), create new node - db_node_id = uuid.uuid4() - nodes_to_create.append(node_data) - - node_id_map[node_id] = db_node_id - - # delete nodes that no longer exist and all edges (edges will be recreated later) - # build database UUID set to determine which nodes to delete - # node_id_map values are database UUIDs, keys are frontend IDs - existing_db_node_ids = set(node_id_map.values()) - # also include updated nodes (these are kept, should not be deleted) - for db_node_id, _ in nodes_to_update: - existing_db_node_ids.add(db_node_id) - - nodes_to_delete = [ - node.id for node_id_str, node in existing_node_map.items() if node.id not in existing_db_node_ids - ] - if nodes_to_delete: - await self.node_repo.delete_by_ids(graph_id, nodes_to_delete) - await self.edge_repo.delete_by_graph(graph_id) - - # create new nodes - for node_data in nodes_to_create: - node_id = node_data.get("id") - if not node_id: - continue - db_node_id_raw = node_id_map.get(node_id) - if not db_node_id_raw: - continue - new_db_node_id: uuid.UUID = db_node_id_raw - - position = node_data.get("position", {}) - position_absolute = node_data.get("positionAbsolute", position) - data_payload = node_data.get("data", {}) or {} - data_for_save, headers_to_store = prepare_node_data_for_save(data_payload) - if headers_to_store: - try: - secret_id = await store_a2a_auth_headers(self.db, graph_id, new_db_node_id, headers_to_store) - if "config" not in data_for_save: - data_for_save["config"] = {} - data_for_save["config"]["a2a_auth_headers"] = {"__secretRef": str(secret_id)} - except Exception as e: - logger.warning(f"[GraphService] Failed to store a2a_auth_headers for node {new_db_node_id}: {e}") - node_type = data_for_save.get("type") or node_data.get("type") or "agent" - - node_create_data = { - "graph_id": graph_id, - "id": new_db_node_id, - "type": node_type, - "position_x": float(position.get("x", 0)), - "position_y": float(position.get("y", 0)), - "position_absolute_x": float(position_absolute.get("x", position.get("x", 0))), - "position_absolute_y": float(position_absolute.get("y", position.get("y", 0))), - "width": float(node_data.get("width", 0)), - "height": float(node_data.get("height", 0)), - "data": data_for_save, - } - - await self.node_repo.create(node_create_data) - - # update existing nodes - for db_node_id, node_data in nodes_to_update: - position = node_data.get("position", {}) - position_absolute = node_data.get("positionAbsolute", position) - data_payload = node_data.get("data", {}) or {} - data_for_save, headers_to_store = prepare_node_data_for_save(data_payload) - if headers_to_store: - try: - from sqlalchemy import delete - - await self.db.execute( - delete(GraphNodeSecret).where( - GraphNodeSecret.graph_id == graph_id, - GraphNodeSecret.node_id == db_node_id, - GraphNodeSecret.key_slug == "a2a_auth_headers", - ) - ) - secret_id = await store_a2a_auth_headers(self.db, graph_id, db_node_id, headers_to_store) - if "config" not in data_for_save: - data_for_save["config"] = {} - data_for_save["config"]["a2a_auth_headers"] = {"__secretRef": str(secret_id)} - except Exception as e: - logger.warning(f"[GraphService] Failed to store a2a_auth_headers for node {db_node_id}: {e}") - node_type = data_for_save.get("type") or node_data.get("type") or "agent" - - update_data = { - "type": node_type, - "position_x": float(position.get("x", 0)), - "position_y": float(position.get("y", 0)), - "position_absolute_x": float(position_absolute.get("x", position.get("x", 0))), - "position_absolute_y": float(position_absolute.get("y", position.get("y", 0))), - "width": float(node_data.get("width", 0)), - "height": float(node_data.get("height", 0)), - "data": data_for_save, - } - - await self.node_repo.update(db_node_id, update_data) - - # save edges (with dedup) - saved_edges_count = 0 - skipped_edges_count = 0 - seen_edges: set[tuple[str, str]] = set() # for dedup - - for edge_data in edges: - source_id = edge_data.get("source") - target_id = edge_data.get("target") - - if not source_id or not target_id: - skipped_edges_count += 1 - continue - - # edge dedup: only save each source-target pair once - edge_key = (source_id, target_id) - if edge_key in seen_edges: - skipped_edges_count += 1 - continue - seen_edges.add(edge_key) - - # find the corresponding database node ID - source_node_id = node_id_map.get(source_id) - target_node_id = node_id_map.get(target_id) - - if not source_node_id or not target_node_id: - skipped_edges_count += 1 - continue - - # extract edge data fields (including edge_type, route_key, source_handle_id, etc.) - edge_data_payload = edge_data.get("data", {}) or {} - - edge_create_data = { - "graph_id": graph_id, - "source_node_id": source_node_id, - "target_node_id": target_node_id, - "data": edge_data_payload, # save edge metadata (edge_type, route_key, etc.) - } - - await self.edge_repo.create(edge_create_data) - saved_edges_count += 1 - - # update graph variables (save viewport and context variables metadata) and updated_at - update_data = {} - graph_variables = graph.variables or {} - - if viewport: - graph_variables["viewport"] = viewport - - # if variables provided, merge into graph_variables - if variables: - # merge variables, preserving existing viewport and other fields - for key, value in variables.items(): - graph_variables[key] = value - - if viewport or variables: - update_data["variables"] = graph_variables - - # update graph updated_at (ensure list sorting is correct) - # BaseModel uses the updated_at field; SQLAlchemy's onupdate auto-updates it - # but to be safe, we explicitly trigger an update - from app.utils.datetime import utc_now - - update_data["updated_at"] = utc_now() - - if update_data: - await self.graph_repo.update(graph_id, update_data) - - _invalidate_compile_cache(graph_id) - - return { - "graph_id": str(graph_id), - "nodes_count": len(nodes), - "edges_count": len(edges), - } - - async def load_graph_state( - self, - graph_id: uuid.UUID, - current_user: Optional[AuthUser] = None, - ) -> Dict[str, Any]: - """ - Load the complete graph state (nodes and edges). - - Return the format expected by the frontend: - { - "nodes": [...], - "edges": [...], - "viewport": {...}, - ... - } - """ - # get the graph - graph = await self.graph_repo.get(graph_id, relations=["nodes", "edges"]) - if not graph: - raise NotFoundException("Graph not found") - - # permission check - if current_user: - await self._ensure_access(graph, current_user, WorkspaceMemberRole.viewer) - - # load nodes and edges - nodes = await self.node_repo.list_by_graph(graph_id) - edges = await self.edge_repo.list_by_graph(graph_id) - - # build node mapping (database UUID -> frontend ID) - node_id_map: Dict[uuid.UUID, str] = {} - frontend_nodes = [] - - for node in nodes: - # generate frontend ID (use node ID string form) - frontend_id = str(node.id) - node_id_map[node.id] = frontend_id - - # build frontend node format - # note: ReactFlow's type field should be "custom" (all nodes use the BuilderNode component) - # the actual node type (e.g. "agent", "condition") is stored in data.type - node_data = node.data or {} - - # ensure data.type exists (used to get colors etc. from nodeRegistry) - # if node.data has no type, use the database node.type field - if "type" not in node_data: - node_data["type"] = node.type - - # restore position info: use saved position and positionAbsolute - # if position_absolute_x/y don't exist (old data), fall back to position_x/y - pos_x = float(node.position_x) - pos_y = float(node.position_y) - pos_abs_x = float(node.position_absolute_x) if node.position_absolute_x is not None else pos_x - pos_abs_y = float(node.position_absolute_y) if node.position_absolute_y is not None else pos_y - - frontend_node: Dict[str, Any] = { - "id": frontend_id, - "type": "custom", # ReactFlow node type, all nodes use BuilderNode - "position": { - "x": pos_x, - "y": pos_y, - }, - "positionAbsolute": { - "x": pos_abs_x, - "y": pos_abs_y, - }, - "width": float(node.width), - "height": float(node.height), - "data": node_data, - "selected": False, - "dragging": False, - } - - # ensure config field exists - node_data_dict = frontend_node["data"] if isinstance(frontend_node["data"], dict) else {} - if "config" not in node_data_dict: - node_data_dict["config"] = {} - # redact: if plaintext a2a_auth_headers remain, do not return to frontend - a2a_headers = node_data_dict.get("config", {}).get("a2a_auth_headers") - if isinstance(a2a_headers, dict) and "__secretRef" not in a2a_headers and a2a_headers: - node_data_dict.setdefault("config", {})["a2a_auth_headers"] = {"__redacted": True} - frontend_node["data"] = node_data_dict - - frontend_nodes.append(frontend_node) - - # build frontend edge format - frontend_edges = [] - for edge in edges: - source_id = node_id_map.get(edge.source_node_id) - target_id = node_id_map.get(edge.target_node_id) - - if not source_id or not target_id: - continue - - # restore edge data field from database - edge_data = edge.data or {} - edge_type = edge_data.get("edge_type", "normal") - - # set style and type based on edge_type - if edge_type == "loop_back": - edge_style = { - "stroke": "#9333ea", # purple, matches frontend LoopBackEdge - "strokeWidth": 2.5, - "strokeDasharray": "5,5", - } - edge_type_for_reactflow = "loop_back" - elif edge_type == "conditional": - edge_style = { - "stroke": "#3b82f6", # blue, matches frontend condition edge - "strokeWidth": 2, - } - edge_type_for_reactflow = "default" - else: - # normal or other types - edge_style = { - "stroke": "#cbd5e1", # matches frontend defaultEdgeOptions color - "strokeWidth": 1.5, - } - edge_type_for_reactflow = "default" - - # use default edge styles consistent with frontend (matches BuilderCanvas.tsx defaultEdgeOptions) - frontend_edge = { - "source": source_id, - "target": target_id, - "sourceHandle": None, - "targetHandle": None, - "type": edge_type_for_reactflow, # set ReactFlow edge type - "animated": True, - "style": edge_style, - "data": edge_data, # restore edge metadata (edge_type, route_key, source_handle_id, etc.) - "id": f"reactflow__edge-{source_id}-{target_id}", - } - frontend_edges.append(frontend_edge) - - # get viewport and variables - viewport = graph.variables.get("viewport", {}) if graph.variables else {} - variables = graph.variables or {} - - return { - "nodes": frontend_nodes, - "edges": frontend_edges, - "viewport": viewport, - "variables": variables, - } - - async def get_graph_detail( - self, - graph_id: uuid.UUID, - current_user: Optional[AuthUser] = None, - ) -> Dict[str, Any]: - """Get detailed graph information (including state).""" - graph = await self.graph_repo.get(graph_id) - if not graph: - raise NotFoundException("Graph not found") - - if current_user: - await self._ensure_access(graph, current_user, WorkspaceMemberRole.viewer) - - # load state - state = await self.load_graph_state(graph_id, current_user) - - return { - "id": str(graph.id), - "name": graph.name, - "description": graph.description, - "workspaceId": str(graph.workspace_id) if graph.workspace_id else None, - "folderId": str(graph.folder_id) if graph.folder_id else None, - "parentId": str(graph.parent_id) if graph.parent_id else None, - "color": graph.color, - "isDeployed": graph.is_deployed, - "variables": graph.variables or {}, - "createdAt": graph.created_at.isoformat() if graph.created_at else None, - "updatedAt": graph.updated_at.isoformat() if graph.updated_at else None, - **state, - } - - async def create_default_deep_agents_graph( - self, - user_id: Optional[Any] = None, - file_emitter: Optional[Any] = None, - ) -> CompiledStateGraph: - """ - Build a default DeepAgents single-node graph in memory (no DB persistence). - Used for "default conversation" when graph_id is None in Chat API. - - Constructs one root node with useDeepAgents=true and no edges, then - uses GraphBuilder so that DeepAgentsGraphBuilder builds a standalone - create_deep_agent graph. - - Returns: - CompiledStateGraph: Same type as create_graph_by_graph_id, ready for ainvoke/astream_events. - """ - - start_time = time.time() - graph_id = uuid.uuid4() - node_id = uuid.uuid4() - - # In-memory graph (not added to session) - graph = AgentGraph( - id=graph_id, - name="Default Conversation", - user_id=str(user_id) if user_id is not None else "", - variables={}, - ) - - # Single root node with DeepAgents enabled - node = GraphNode( - id=node_id, - graph_id=graph_id, - type="agent", - data={ - "label": "Agent", - "config": {"useDeepAgents": True, "skills": ["*"]}, - }, - position_x=0, - position_y=0, - width=0, - height=0, - ) - - nodes: List[GraphNode] = [node] - edges: List[GraphEdge] = [] - - logger.info(f"[GraphService] ===== create_default_deep_agents_graph START ===== | user_id={user_id}") - - model_service = ModelService(self.db) - compiled_graph = await build_deep_agents_graph( - graph=graph, - nodes=nodes, - edges=edges, - user_id=user_id, - model_service=model_service, - file_emitter=file_emitter, - ) - - elapsed_ms = (time.time() - start_time) * 1000 - logger.info( - f"[GraphService] ===== create_default_deep_agents_graph COMPLETE ===== | " - f"user_id={user_id} | elapsed={elapsed_ms:.2f}ms" - ) - return compiled_graph # type: ignore[no-any-return] - - async def create_skill_creator_graph( - self, - user_id: str, - workspace_id: Optional[uuid.UUID] = None, - ) -> AgentGraph: - """Create the persisted Skill Creator graph container. - - Frontend currently applies the `skill-creator` template from static assets after graph - creation. This helper keeps a stable backend entry point for callers and tests that - expect the Skill Creator graph to be a first-class graph type. - """ - return await self.create_graph( - name="Skill Creator", - user_id=user_id, - workspace_id=workspace_id, - description="A specialized agent for creating and editing Skills", - ) - - async def create_graph_by_graph_id( - self, - graph_id: uuid.UUID, - user_id: Optional[Any] = None, - current_user: Optional[AuthUser] = None, - file_emitter: Optional[Any] = None, - thread_id: Optional[str] = None, - ) -> CompiledStateGraph: - """ - Create a LangGraph StateGraph from a graph stored in the database. - - Fetches the graph, nodes, and edges from the database and builds - a compiled StateGraph where each node is an Agent. - - Args: - graph_id: The UUID of the graph to build - user_id: User ID for workspace isolation - current_user: Current authenticated user for permission checks - - Returns: - CompiledStateGraph: The compiled graph ready for execution - - Raises: - NotFoundException: If the graph is not found - ForbiddenException: If the user doesn't have access to the graph - """ - - start_time = time.time() - logger.info( - f"[GraphService] ===== create_graph_by_graph_id START ===== | graph_id={graph_id} | user_id={user_id}" - ) - - # Fetch the graph - logger.debug(f"[GraphService] Fetching graph from database | graph_id={graph_id}") - graph = await self.graph_repo.get(graph_id) - if not graph: - logger.error(f"[GraphService] Graph not found | graph_id={graph_id}") - raise NotFoundException(f"Graph with id {graph_id} not found") - - logger.info( - f"[GraphService] Graph found | name='{graph.name}' | " - f"is_deployed={graph.is_deployed} | workspace_id={graph.workspace_id}" - ) - - # Check access permissions if current_user is provided - if current_user: - logger.debug( - f"[GraphService] Checking access permissions | user_id={current_user.id} | graph_owner={graph.user_id}" - ) - await self._ensure_access(graph, current_user, WorkspaceMemberRole.viewer) - logger.debug("[GraphService] Access permission check passed") - - # Check in-memory compile cache (keyed by graph + runtime prompt context) - cache_key = _build_runtime_aware_compile_cache_key(graph, user_id=user_id, thread_id=thread_id) - now_ts = time.time() - if cache_key in _compile_cache: - cached_graph, cached_at = _compile_cache[cache_key] - if (now_ts - cached_at) < _COMPILE_CACHE_TTL: - logger.info(f"[GraphService] Using cached compiled graph | graph_id={graph_id}") - return cached_graph - _compile_cache.pop(cache_key, None) - - # Code mode: bypass GraphBuilder entirely - if (graph.variables or {}).get("graph_mode") == "code": - logger.info(f"[GraphService] Code mode detected | graph_id={graph_id}") - compiled_graph = await self._compile_code_graph(graph) - _compile_cache[cache_key] = (compiled_graph, time.time()) - return compiled_graph # type: ignore[no-any-return] - - # Load nodes and edges - logger.debug(f"[GraphService] Loading nodes and edges for graph_id={graph_id}") - nodes = await self.node_repo.list_by_graph(graph_id) - edges = await self.edge_repo.list_by_graph(graph_id) - await hydrate_nodes_a2a_secrets(self.db, nodes) - - logger.info(f"[GraphService] Loaded graph data | nodes_count={len(nodes)} | edges_count={len(edges)}") - - # Log node details - for idx, node in enumerate(nodes): - logger.debug(f"[GraphService] Node [{idx + 1}/{len(nodes)}] | id={node.id} | type={node.type}") - - # Build the graph - logger.info("[GraphService] Building DeepAgents graph...") - model_service = ModelService(self.db) - compiled_graph = await build_deep_agents_graph( - graph=graph, - nodes=nodes, - edges=edges, - user_id=user_id, - model_service=model_service, - file_emitter=file_emitter, - thread_id=thread_id, - ) - - elapsed_ms = (time.time() - start_time) * 1000 - logger.info( - f"[GraphService] ===== create_graph_by_graph_id COMPLETE ===== | user_id={user_id} | " - f"graph_id={graph_id} | graph_name='{graph.name}' | " - f"nodes={len(nodes)} | edges={len(edges)} | elapsed={elapsed_ms:.2f}ms" - ) - - _compile_cache[cache_key] = (compiled_graph, time.time()) - return compiled_graph # type: ignore[no-any-return] - - async def _compile_code_graph(self, graph): - """Compile a code-mode graph: exec user code → get StateGraph → compile.""" - from app.core.agent.checkpointer.checkpointer import get_checkpointer - from app.core.code_executor import execute_code - - code = (graph.variables or {}).get("code_content", "") - if not code.strip(): - raise ValueError(f"Code graph {graph.id} has no code") - - state_graph = execute_code(code) - compiled = state_graph.compile(checkpointer=get_checkpointer()) - return compiled diff --git a/backend/app/services/mcp_client_service.py b/backend/app/services/mcp_client_service.py index 6e826b876..6f9b9e542 100644 --- a/backend/app/services/mcp_client_service.py +++ b/backend/app/services/mcp_client_service.py @@ -13,6 +13,7 @@ from loguru import logger +from app.common.app_errors import InternalServiceError, InvalidRequestError from app.core.tools.tool import EnhancedTool from app.models.mcp import McpServer @@ -168,7 +169,11 @@ async def _fetch_tools( # Get tool definitions (MCPTool objects) if not toolkit.session: - raise RuntimeError(f"Toolkit session not initialized for server: {server.name}") + raise InternalServiceError( + "Toolkit session is not initialized", + code="MCP_TOOLKIT_SESSION_MISSING", + data={"server_name": server.name}, + ) available_tools = await toolkit.session.list_tools() # type: ignore mcp_tool_definitions = available_tools.tools @@ -197,7 +202,11 @@ def config_from_server(server: McpServer) -> McpConnectionConfig: McpConnectionConfig """ if not server.url: - raise ValueError("Server URL is required") + raise InvalidRequestError( + "Server URL is required", + code="MCP_SERVER_URL_REQUIRED", + data={"server_id": str(server.id) if getattr(server, "id", None) else None, "name": server.name}, + ) return McpConnectionConfig( url=server.url, transport=server.transport or "streamable-http", diff --git a/backend/app/services/mcp_server_service.py b/backend/app/services/mcp_server_service.py index c9a3679cf..38b223d5a 100644 --- a/backend/app/services/mcp_server_service.py +++ b/backend/app/services/mcp_server_service.py @@ -13,7 +13,7 @@ from loguru import logger from sqlalchemy.ext.asyncio import AsyncSession -from app.common.exceptions import BadRequestException, NotFoundException +from app.common.app_errors import InternalServiceError, InvalidRequestError, NotFoundError from app.models.enums import McpConnectionStatus from app.models.mcp import McpServer from app.repositories.mcp_server import McpServerRepository @@ -61,7 +61,11 @@ async def create( existing = await self.repo.get_by_name(user_id, data.name) if existing: logger.warning(f"[McpServerService] Duplicate name: {data.name} for user {user_id}") - raise BadRequestException(f"MCP server with name '{data.name}' already exists") + raise InvalidRequestError( + f"MCP server with name '{data.name}' already exists", + code="MCP_SERVER_NAME_ALREADY_EXISTS", + data={"name": data.name}, + ) server = await self.repo.create( { @@ -106,7 +110,11 @@ async def update( if data.name and data.name != server.name: existing = await self.repo.get_by_name(user_id, data.name) if existing: - raise BadRequestException(f"MCP server with name '{data.name}' already exists") + raise InvalidRequestError( + f"MCP server with name '{data.name}' already exists", + code="MCP_SERVER_NAME_ALREADY_EXISTS", + data={"name": data.name}, + ) # Build update dict update_data = {} @@ -120,7 +128,11 @@ async def update( updated_server = await self.repo.update(server_id, update_data) if updated_server is None: - raise ValueError(f"MCP server {server_id} not found") + raise InternalServiceError( + "MCP server update failed because the server record disappeared", + code="MCP_SERVER_UPDATE_TARGET_MISSING", + data={"server_id": str(server_id)}, + ) await self.commit() logger.info(f"Updated MCP server: {updated_server.name}") return updated_server @@ -235,7 +247,11 @@ async def toggle_enabled( updated_server = await self.repo.toggle_enabled(server_id, enabled) if updated_server is None: - raise ValueError(f"MCP server {server_id} not found") + raise InternalServiceError( + "MCP server enable state update failed because the server record disappeared", + code="MCP_SERVER_TOGGLE_TARGET_MISSING", + data={"server_id": str(server_id), "enabled": enabled}, + ) await self.commit() logger.info(f"MCP server {updated_server.name} {'enabled' if enabled else 'disabled'}") return updated_server @@ -298,15 +314,17 @@ async def get_with_permission( MCP server Raises: - NotFoundException: server does not exist or no permission + NotFoundError: server does not exist or no permission """ server = await self.repo.get(server_id) if not server or server.deleted_at: - raise NotFoundException("MCP server not found") + raise NotFoundError("MCP server not found", code="MCP_SERVER_NOT_FOUND", data={"server_id": str(server_id)}) if server.user_id != user_id: - raise NotFoundException("MCP server not found") # Security: don't reveal existence + raise NotFoundError( + "MCP server not found", code="MCP_SERVER_NOT_FOUND", data={"server_id": str(server_id)} + ) # Security: don't reveal existence return server diff --git a/backend/app/services/memory_service.py b/backend/app/services/memory_service.py index 2e425f2ca..f9645572b 100644 --- a/backend/app/services/memory_service.py +++ b/backend/app/services/memory_service.py @@ -26,6 +26,7 @@ from sqlalchemy.exc import IntegrityError from sqlalchemy.ext.asyncio import AsyncSession +from app.common.app_errors import InvalidRequestError from app.core.database import AsyncSessionLocal, engine from app.models.memory import Memory from app.schemas.memory import UserMemory @@ -74,7 +75,11 @@ def __init__(self, db: Optional[AsyncSession] = None): async def _get_table(self, table_type: str = "memories") -> sa.Table: # Currently only supports the 'memories' table if table_type != "memories": - raise ValueError(f"Unsupported table_type: {table_type}") + raise InvalidRequestError( + "Unsupported memory table type", + code="MEMORY_TABLE_TYPE_UNSUPPORTED", + data={"table_type": table_type}, + ) table: sa.Table = Memory.__table__ # type: ignore[assignment] return table @@ -284,16 +289,8 @@ async def get_user_memories( dialect = engine.dialect.name if dialect == "postgresql": # PostgreSQL: Use JSONB @> operator to check if array contains the topic - # Cast JSON to JSONB for proper containment check for topic in topics: - # Check if topics JSON array contains the topic string - # Using JSONB @> operator: topics::jsonb @> '["topic"]'::jsonb - # Escape single quotes in JSON string for SQL safety - topic_array_json = json.dumps([topic]) - # Replace single quotes with escaped single quotes for SQL - topic_array_json_escaped = topic_array_json.replace("'", "''") - # Use text() with string formatting (safe for JSON strings from json.dumps) - stmt = stmt.where(text(f"topics::jsonb @> '{topic_array_json_escaped}'::jsonb")) + stmt = stmt.where(text("topics::jsonb @> :val::jsonb").bindparams(val=json.dumps([topic]))) else: # SQLite or other: Use LIKE for compatibility for topic in topics: diff --git a/backend/app/services/model_credential_service.py b/backend/app/services/model_credential_service.py index 1924c9443..b9f850361 100644 --- a/backend/app/services/model_credential_service.py +++ b/backend/app/services/model_credential_service.py @@ -10,7 +10,7 @@ from sqlalchemy.ext.asyncio import AsyncSession -from app.common.exceptions import NotFoundException +from app.common.app_errors import InvalidRequestError, NotFoundError from app.core.model import validate_provider_credentials from app.core.model.utils import decrypt_credentials, encrypt_credentials from app.repositories.model_credential import ModelCredentialRepository @@ -95,7 +95,11 @@ async def upsert_credential( """ provider = await self.provider_repo.get_by_name(provider_name) if not provider: - raise NotFoundException(f"Provider not found: {provider_name}") + raise NotFoundError( + "Provider not found", + code="MODEL_PROVIDER_NOT_FOUND", + data={"provider_name": provider_name}, + ) is_valid = False validation_error = None @@ -131,9 +135,17 @@ async def validate_credential(self, credential_id: uuid.UUID) -> Dict[str, Any]: """Re-validate an existing credential. Look up by ID, decrypt, call API to validate.""" credential = await self.repo.get(credential_id, relations=["provider"]) if not credential: - raise NotFoundException("Credential not found") + raise NotFoundError( + "Credential not found", + code="MODEL_CREDENTIAL_NOT_FOUND", + data={"credential_id": str(credential_id)}, + ) if not credential.provider: - raise NotFoundException("Credential's associated provider not found") + raise NotFoundError( + "Credential's associated provider not found", + code="MODEL_PROVIDER_NOT_FOUND", + data={"credential_id": str(credential_id)}, + ) decrypted = decrypt_credentials(credential.credentials) is_valid, error = await self._validate_for_provider(credential.provider, decrypted, credential.provider_id) @@ -153,7 +165,11 @@ async def get_credential(self, credential_id: uuid.UUID, include_credentials: bo """Get credential details.""" credential = await self.repo.get(credential_id, relations=["provider"]) if not credential: - raise NotFoundException("Credential not found") + raise NotFoundError( + "Credential not found", + code="MODEL_CREDENTIAL_NOT_FOUND", + data={"credential_id": str(credential_id)}, + ) pname = credential.provider.name if credential.provider else "" pdisplay = credential.provider.display_name if credential.provider else "" @@ -186,19 +202,23 @@ async def list_credentials(self) -> List[Dict[str, Any]]: async def delete_credential(self, credential_id: uuid.UUID) -> None: """Delete a built-in provider's credential. Custom provider credentials cannot be deleted separately.""" - from app.common.exceptions import BadRequestException - credential = await self.repo.get(credential_id, relations=["provider"]) if not credential: - raise NotFoundException("Credential not found") + raise NotFoundError( + "Credential not found", + code="MODEL_CREDENTIAL_NOT_FOUND", + data={"credential_id": str(credential_id)}, + ) if ( credential.provider and credential.provider.provider_type == "custom" and not credential.provider.is_template ): - raise BadRequestException( - f"Cannot delete credentials for custom provider separately. Use DELETE /model-providers/{credential.provider.name} to remove the entire provider." + raise InvalidRequestError( + f"Cannot delete credentials for custom provider separately. Use DELETE /model-providers/{credential.provider.name} to remove the entire provider.", + code="MODEL_CREDENTIAL_CUSTOM_DELETE_FORBIDDEN", + data={"provider_name": credential.provider.name}, ) await self.repo.delete(credential_id) diff --git a/backend/app/services/model_provider_service.py b/backend/app/services/model_provider_service.py index cdc8d2ef7..ba220149c 100644 --- a/backend/app/services/model_provider_service.py +++ b/backend/app/services/model_provider_service.py @@ -6,7 +6,7 @@ from sqlalchemy.ext.asyncio import AsyncSession -from app.common.exceptions import BadRequestException, NotFoundException +from app.common.app_errors import InvalidRequestError, NotFoundError from app.core.model import get_factory from app.repositories.model_credential import ModelCredentialRepository from app.repositories.model_instance import ModelInstanceRepository @@ -174,18 +174,26 @@ async def update_provider_defaults(self, provider_name: str, default_parameters: Updated provider info Raises: - NotFoundException: provider does not exist + NotFoundError: provider does not exist """ db_provider = await self.repo.get_by_name(provider_name) if not db_provider: - raise NotFoundException(f"Provider not found: {provider_name}") + raise NotFoundError( + "Provider not found", + code="MODEL_PROVIDER_NOT_FOUND", + data={"provider_name": provider_name}, + ) await self.repo.update_default_parameters(provider_name, default_parameters) await self.commit() result = await self.get_provider(provider_name) if not result: - raise NotFoundException(f"Provider not found: {provider_name}") + raise NotFoundError( + "Provider not found", + code="MODEL_PROVIDER_NOT_FOUND", + data={"provider_name": provider_name}, + ) return result async def _create_derived_provider(self, template: Any, name: str, display_name: str, template_name: str) -> Any: @@ -221,7 +229,11 @@ async def add_custom_provider( template = self.factory.get_provider("custom") if not template: - raise NotFoundException("Provider not found: custom") + raise NotFoundError( + "Provider not found", + code="MODEL_PROVIDER_NOT_FOUND", + data={"provider_name": "custom"}, + ) is_valid = False validation_error = None @@ -284,11 +296,23 @@ async def delete_provider(self, provider_name: str) -> None: if not provider: factory_provider = self.factory.get_provider(provider_name) if factory_provider: - raise BadRequestException(f"Built-in provider cannot be deleted: {provider_name}") - raise NotFoundException(f"Provider not found: {provider_name}") + raise InvalidRequestError( + f"Built-in provider cannot be deleted: {provider_name}", + code="MODEL_PROVIDER_BUILTIN_DELETE_FORBIDDEN", + data={"provider_name": provider_name}, + ) + raise NotFoundError( + "Provider not found", + code="MODEL_PROVIDER_NOT_FOUND", + data={"provider_name": provider_name}, + ) if provider.provider_type != "custom": - raise BadRequestException(f"Only custom providers can be deleted: {provider_name}") + raise InvalidRequestError( + f"Only custom providers can be deleted: {provider_name}", + code="MODEL_PROVIDER_DELETE_FORBIDDEN", + data={"provider_name": provider_name}, + ) await self.repo.delete(provider.id) logger.info(f"Deleted custom provider: {provider_name}") diff --git a/backend/app/services/model_service.py b/backend/app/services/model_service.py index ecd9544b2..efd3c4b57 100644 --- a/backend/app/services/model_service.py +++ b/backend/app/services/model_service.py @@ -9,7 +9,8 @@ from sqlalchemy.ext.asyncio import AsyncSession -from app.common.exceptions import BadRequestException, ModelConfigError, NotFoundException +from app.common.app_errors import InvalidRequestError, ModelConfigError, NotFoundError +from app.common.stream_errors import stream_error_event from app.core.model import ModelType, create_model_instance from app.core.model.factory import get_factory from app.models.enums import ModelUsageSource @@ -261,7 +262,11 @@ async def update_model_instance( """Update model instance parameters.""" instance = await self.repo.get(instance_id) if not instance: - raise NotFoundException(f"Model instance not found: {instance_id}") + raise NotFoundError( + "Model instance not found", + code="MODEL_INSTANCE_NOT_FOUND", + data={"instance_id": str(instance_id)}, + ) updates: Dict[str, Any] = {} if model_parameters is not None: @@ -324,7 +329,10 @@ async def get_model_instance( implementation_name: Optional[str] = None model_parameters: Dict[str, Any] = {} if not provider_name or not model_name: - raise BadRequestException("provider_name and model_name are required") + raise InvalidRequestError( + "provider_name and model_name are required", + code="MODEL_PROVIDER_OR_NAME_REQUIRED", + ) provider = await self.provider_repo.get_by_name(provider_name) if not provider: @@ -524,12 +532,13 @@ async def test_output_stream( """ instance = await self.repo.get_by_name(model_name) if not instance: - err_data = { - "error_code": MODEL_NOT_FOUND, - "message": f'Model "{model_name}" is not registered.', - "params": {"model": model_name or ""}, - } - yield f"event: error\ndata: {json.dumps(err_data)}\n\n" + yield stream_error_event( + code=MODEL_NOT_FOUND, + message=f'Model "{model_name}" is not registered.', + data={"model": model_name or ""}, + source="api", + user_action="configure_model", + ) return provider_name = instance.resolved_provider_name @@ -539,12 +548,13 @@ async def test_output_stream( credentials = await self.credential_service.get_decrypted_credentials(provider_name) if not credentials: - err_data = { - "error_code": MODEL_NO_CREDENTIALS, - "message": f'No valid API key for provider "{provider_name}".', - "params": {"model": model_name or "", "provider": provider_name or ""}, - } - yield f"event: error\ndata: {json.dumps(err_data)}\n\n" + yield stream_error_event( + code=MODEL_NO_CREDENTIALS, + message=f'No valid API key for provider "{provider_name}".', + data={"model": model_name or "", "provider": provider_name or ""}, + source="api", + user_action="configure_model", + ) return effective_params = {**(instance.model_parameters or {})} @@ -560,7 +570,12 @@ async def test_output_stream( effective_params, ) except Exception as e: - yield f"event: error\ndata: {json.dumps({'error': f'Failed to create model instance: {str(e)}'})}\n\n" + yield stream_error_event( + code="MODEL_INSTANCE_CREATE_FAILED", + message="Failed to create model instance.", + data={"detail": str(e)}, + source="runtime", + ) return start_time = time.monotonic() @@ -617,4 +632,11 @@ async def test_output_stream( user_id=user_id, source=ModelUsageSource.PLAYGROUND, ) - yield f"event: error\ndata: {json.dumps({'error': str(e)})}\n\n" + yield stream_error_event( + code="MODEL_STREAM_ERROR", + message="Model streaming failed.", + data={"detail": str(e)}, + source="runtime", + retryable=True, + user_action="retry", + ) diff --git a/backend/app/services/oauth_service.py b/backend/app/services/oauth_service.py index ce987d8dd..aa5c007a7 100644 --- a/backend/app/services/oauth_service.py +++ b/backend/app/services/oauth_service.py @@ -20,7 +20,7 @@ from sqlalchemy import select from sqlalchemy.ext.asyncio import AsyncSession -from app.common.exceptions import BadRequestException, UnauthorizedException +from app.common.app_errors import AuthenticationError, InvalidRequestError from app.core.oauth import get_oauth_config from app.core.redis import RedisClient from app.models.auth import AuthUser @@ -42,6 +42,61 @@ def __init__(self, db: AsyncSession): self.user_repo = AuthUserRepository(db) self.oauth_config = get_oauth_config() + def _provider_not_found(self, provider_name: str) -> InvalidRequestError: + return InvalidRequestError( + f"OAuth provider '{provider_name}' not found", + code="OAUTH_PROVIDER_NOT_FOUND", + data={"provider_name": provider_name}, + ) + + def _endpoint_discovery_failed(self, provider_name: str, endpoint_type: str) -> InvalidRequestError: + code_map = { + "authorization": "OAUTH_DISCOVERY_FAILED", + "token": "OAUTH_TOKEN_ENDPOINT_DISCOVERY_FAILED", + "userinfo": "OAUTH_USERINFO_ENDPOINT_DISCOVERY_FAILED", + } + message_map = { + "authorization": f"Failed to discover OAuth authorization endpoint for {provider_name}", + "token": f"Failed to discover OAuth token endpoint for {provider_name}", + "userinfo": f"Failed to discover OAuth userinfo endpoint for {provider_name}", + } + return InvalidRequestError( + message_map[endpoint_type], + code=code_map[endpoint_type], + data={"provider_name": provider_name}, + ) + + def _missing_endpoint(self, provider_name: str, endpoint_type: str) -> InvalidRequestError: + code_map = { + "authorization": "OAUTH_AUTHORIZE_URL_MISSING", + "token": "OAUTH_TOKEN_URL_MISSING", + "userinfo": "OAUTH_USERINFO_URL_MISSING", + } + message_map = { + "authorization": f"No authorization URL configured for {provider_name}", + "token": f"No token URL configured for {provider_name}", + "userinfo": f"No userinfo URL configured for {provider_name}", + } + return InvalidRequestError( + message_map[endpoint_type], + code=code_map[endpoint_type], + data={"provider_name": provider_name}, + ) + + def _token_exchange_failed(self, provider_name: str) -> InvalidRequestError: + return InvalidRequestError( + f"Failed to exchange OAuth code for tokens for {provider_name}", + code="OAUTH_TOKEN_EXCHANGE_FAILED", + data={"provider_name": provider_name}, + ) + + def _userinfo_fetch_failed(self, provider_name: str) -> InvalidRequestError: + return InvalidRequestError( + f"Failed to fetch OAuth user info for {provider_name}", + code="OAUTH_USERINFO_FETCH_FAILED", + data={"provider_name": provider_name}, + ) + # ==================== Authorization Flow ==================== async def generate_authorization_url( @@ -62,11 +117,11 @@ async def generate_authorization_url( Tuple of (authorization_url, state) Raises: - BadRequestException: Provider not found or disabled + InvalidRequestError: Provider not found or disabled """ provider = self.oauth_config.get_provider(provider_name) if not provider: - raise BadRequestException(f"OAuth provider '{provider_name}' not found or not enabled") + raise self._provider_not_found(provider_name) # Generate or reuse state if not state: @@ -96,10 +151,10 @@ async def generate_authorization_url( authorize_url = cast(Optional[str], oidc_config.get("authorization_endpoint")) except Exception as e: logger.error(f"{LOG_PREFIX} OIDC Discovery failed: {e}") - raise BadRequestException(f"Failed to discover OAuth endpoints for {provider_name}") + raise self._endpoint_discovery_failed(provider_name, "authorization") if not authorize_url: - raise BadRequestException(f"No authorization URL configured for {provider_name}") + raise self._missing_endpoint(provider_name, "authorization") # Build authorization URL params params = { @@ -167,7 +222,7 @@ async def exchange_code_for_tokens( """ provider = self.oauth_config.get_provider(provider_name) if not provider: - raise BadRequestException(f"OAuth provider '{provider_name}' not found") + raise self._provider_not_found(provider_name) # Get token URL token_url: Optional[str] = provider.token_url or None @@ -177,10 +232,10 @@ async def exchange_code_for_tokens( token_url = cast(Optional[str], oidc_config.get("token_endpoint")) except Exception as e: logger.error(f"{LOG_PREFIX} OIDC Discovery failed: {e}") - raise BadRequestException(f"Failed to discover token endpoint for {provider_name}") + raise self._endpoint_discovery_failed(provider_name, "token") if not token_url: - raise BadRequestException(f"No token URL configured for {provider_name}") + raise self._missing_endpoint(provider_name, "token") # Build request data = { @@ -228,10 +283,10 @@ async def exchange_code_for_tokens( except httpx.HTTPStatusError as e: logger.error(f"{LOG_PREFIX} Token exchange failed: {e.response.text}") - raise BadRequestException("Failed to exchange code for tokens") + raise self._token_exchange_failed(provider_name) except Exception as e: logger.error(f"{LOG_PREFIX} Token exchange error: {e}") - raise BadRequestException("Token exchange failed") + raise self._token_exchange_failed(provider_name) # ==================== User Info ==================== @@ -252,7 +307,7 @@ async def fetch_userinfo( """ provider = self.oauth_config.get_provider(provider_name) if not provider: - raise BadRequestException(f"OAuth provider '{provider_name}' not found") + raise self._provider_not_found(provider_name) # Get userinfo URL userinfo_url = provider.userinfo_url @@ -262,9 +317,10 @@ async def fetch_userinfo( userinfo_url = oidc_config.get("userinfo_endpoint") except Exception as e: logger.error(f"{LOG_PREFIX} OIDC Discovery failed: {e}") + raise self._endpoint_discovery_failed(provider_name, "userinfo") if not userinfo_url: - raise BadRequestException(f"No userinfo URL configured for {provider_name}") + raise self._missing_endpoint(provider_name, "userinfo") headers = { "Authorization": f"Bearer {access_token}", @@ -288,10 +344,10 @@ async def fetch_userinfo( except httpx.HTTPStatusError as e: logger.error(f"{LOG_PREFIX} Failed to fetch userinfo: {e.response.text}") - raise BadRequestException("Failed to fetch user info") + raise self._userinfo_fetch_failed(provider_name) except Exception as e: logger.error(f"{LOG_PREFIX} Userinfo fetch error: {e}") - raise BadRequestException("Failed to fetch user info") + raise self._userinfo_fetch_failed(provider_name) async def _fetch_github_email(self, access_token: str) -> Optional[str]: """Get GitHub primary email.""" @@ -341,7 +397,7 @@ def parse_userinfo( """ provider = self.oauth_config.get_provider(provider_name) if not provider: - raise BadRequestException(f"OAuth provider '{provider_name}' not found") + raise self._provider_not_found(provider_name) mapping = provider.user_mapping @@ -383,7 +439,7 @@ async def find_or_create_user( Tuple of (user, is_new_user) Raises: - UnauthorizedException: User missing and registration disabled + AuthenticationError: User missing and registration disabled """ oauth_settings = self.oauth_config.settings @@ -419,11 +475,15 @@ async def find_or_create_user( # 3) Create new user if not oauth_settings.allow_registration: - raise UnauthorizedException("Registration via OAuth is not allowed. Please sign up first.") + raise AuthenticationError( + "Registration via OAuth is not allowed. Please sign up first.", code="OAUTH_REGISTRATION_DISABLED" + ) if not email: - raise BadRequestException( - f"Email is required for registration. Please ensure your {provider_name} account has a verified email." + raise InvalidRequestError( + f"Email is required for registration. Please ensure your {provider_name} account has a verified email.", + code="OAUTH_EMAIL_REQUIRED", + data={"provider_name": provider_name}, ) # Create new user @@ -551,12 +611,12 @@ async def unlink_oauth_account( Whether unlink succeeded Raises: - BadRequestException: User would be unable to sign in + InvalidRequestError: User would be unable to sign in """ # Ensure user can still sign in after unlink user = await self.user_repo.get_by_id(user_id) if not user: - raise BadRequestException("User not found") + raise InvalidRequestError("User not found", code="USER_NOT_FOUND", data={"user_id": user_id}) # Get all user OAuth bindings oauth_accounts = await self.get_user_oauth_accounts(user_id) @@ -570,7 +630,10 @@ async def unlink_oauth_account( # Disallow unlink when no password and only one OAuth binding if not user.hashed_password and len(oauth_accounts) == 1: - raise BadRequestException("Cannot unlink the only OAuth account. Please set a password first.") + raise InvalidRequestError( + "Cannot unlink the only OAuth account. Please set a password first.", + code="OAUTH_LAST_ACCOUNT_UNLINK_FORBIDDEN", + ) await self._delete_oauth_account(target_account) logger.info(f"{LOG_PREFIX} Unlinked OAuth account: {provider_name} from user {user_id}") diff --git a/backend/app/services/openapi_graph_service.py b/backend/app/services/openapi_graph_service.py deleted file mode 100644 index da2d1df39..000000000 --- a/backend/app/services/openapi_graph_service.py +++ /dev/null @@ -1,280 +0,0 @@ -""" -OpenAPI Graph Service — core business logic. - -Responsibilities: -- Start graph background execution (run) -- Query execution status (status) -- Abort execution (abort) -- Get execution result (result) -""" - -from __future__ import annotations - -import asyncio -import uuid -from datetime import datetime, timezone -from typing import Any, Dict, Optional - -from langchain.messages import AIMessage, HumanMessage -from loguru import logger -from sqlalchemy.ext.asyncio import AsyncSession - -from app.common.exceptions import BadRequestException, NotFoundException -from app.core.database import AsyncSessionLocal -from app.models.graph_execution import ExecutionStatus, GraphExecution -from app.repositories.graph import GraphRepository -from app.repositories.graph_execution import GraphExecutionRepository -from app.services.graph_service import GraphService -from app.utils.task_manager import task_manager - -from .base import BaseService - - -class OpenApiGraphService(BaseService): - """OpenAPI graph execution service.""" - - def __init__(self, db: AsyncSession): - super().__init__(db) - self.exec_repo = GraphExecutionRepository(db) - self.graph_repo = GraphRepository(db) - - async def run_graph( - self, - *, - graph_id: uuid.UUID, - user_id: str, - variables: Optional[Dict[str, Any]] = None, - ) -> Dict[str, Any]: - """ - Start graph execution (background async). - - Returns: - {"executionId": str, "status": str} - """ - # verify graph exists - graph = await self.graph_repo.get(graph_id) - if not graph: - raise NotFoundException("Graph not found") - - # create execution record - execution = GraphExecution( - graph_id=graph_id, - user_id=user_id, - status=ExecutionStatus.INIT, - input_variables=variables or {}, - ) - self.db.add(execution) - await self.db.commit() - await self.db.refresh(execution) - - execution_id = execution.id - logger.info(f"[OpenAPI] Graph execution created | execution_id={execution_id} graph_id={graph_id}") - - # start background execution task - asyncio.create_task( - self._execute_graph_background( - execution_id=execution_id, - graph_id=graph_id, - user_id=user_id, - variables=variables or {}, - ) - ) - - return { - "executionId": str(execution_id), - "status": ExecutionStatus.INIT.value, - } - - async def _execute_graph_background( - self, - execution_id: uuid.UUID, - graph_id: uuid.UUID, - user_id: str, - variables: Dict[str, Any], - ) -> None: - """Execute graph in background (using an independent DB session).""" - try: - async with AsyncSessionLocal() as db: - # update status to executing - exec_repo = GraphExecutionRepository(db) - execution = await exec_repo.get(execution_id) - if not execution: - return - - execution.status = ExecutionStatus.EXECUTING - execution.started_at = datetime.now(timezone.utc) - await db.commit() - - # compile graph - graph_service = GraphService(db) - compiled_graph = await graph_service.create_graph_by_graph_id( - graph_id=graph_id, - user_id=user_id, - ) - - # build input messages - # if variables contains a message field, use it as the user message - user_message = variables.pop("message", "") - if not user_message: - user_message = variables.pop("query", "Execute task") - - initial_context = {} - # put remaining variables into context - for key, value in variables.items(): - initial_context[key] = value - - # also load graph.variables.context - graph_model = await GraphRepository(db).get(graph_id) - if graph_model and graph_model.variables: - context_vars = graph_model.variables.get("context", {}) - for key, value in context_vars.items(): - if key not in initial_context: # variables take precedence - if isinstance(value, dict) and "value" in value: - initial_context[key] = value["value"] - else: - initial_context[key] = value - - # configuration - thread_id = f"openapi_{execution_id}" - from langchain_core.runnables.config import RunnableConfig - - config: RunnableConfig = { - "configurable": {"thread_id": thread_id, "user_id": user_id}, - "recursion_limit": 150, - } - - # register with task_manager to support abort - invoke_task = asyncio.create_task( - compiled_graph.ainvoke( - {"messages": [HumanMessage(content=user_message)], "context": initial_context}, - config=config, - ) - ) - await task_manager.register_task(thread_id, invoke_task) - - try: - result = await invoke_task - except asyncio.CancelledError: - # aborted by user - execution = await exec_repo.get(execution_id) - if execution: - execution.status = ExecutionStatus.FAILED - execution.error_message = "Execution aborted by user" - execution.finished_at = datetime.now(timezone.utc) - await db.commit() - return - finally: - await task_manager.unregister_task(thread_id) - - # extract result - messages = result.get("messages", []) - last_ai_msg = next( - (m for m in reversed(messages) if isinstance(m, AIMessage)), - None, - ) - - output_data: Dict[str, Any] = {} - if last_ai_msg: - output_data["content"] = str(last_ai_msg.content) if last_ai_msg.content else "" - if hasattr(last_ai_msg, "tool_calls") and last_ai_msg.tool_calls: - output_data["tool_calls"] = [ - { - "name": tc.get("name"), - "args": tc.get("args"), - } - for tc in last_ai_msg.tool_calls - ] - - # update execution record - execution = await exec_repo.get(execution_id) - if execution: - execution.status = ExecutionStatus.FINISH - execution.output = output_data - execution.finished_at = datetime.now(timezone.utc) - await db.commit() - - logger.info(f"[OpenAPI] Graph execution completed | execution_id={execution_id}") - - except Exception as e: - logger.error(f"[OpenAPI] Graph execution failed | execution_id={execution_id} error={e}") - try: - async with AsyncSessionLocal() as db: - exec_repo = GraphExecutionRepository(db) - execution = await exec_repo.get(execution_id) - if execution: - execution.status = ExecutionStatus.FAILED - execution.error_message = str(e)[:2000] - execution.finished_at = datetime.now(timezone.utc) - await db.commit() - except Exception as inner_e: - logger.error(f"[OpenAPI] Failed to update execution status: {inner_e}") - - async def get_status( - self, - execution_id: uuid.UUID, - user_id: str, - ) -> Dict[str, Any]: - """Get execution status.""" - execution = await self.exec_repo.get_by_id_and_user(execution_id, user_id) - if not execution: - raise NotFoundException("Execution not found") - - return { - "executionId": str(execution.id), - "graphId": str(execution.graph_id), - "status": execution.status.value, - "startedAt": execution.started_at.isoformat() if execution.started_at else None, - "finishedAt": execution.finished_at.isoformat() if execution.finished_at else None, - "errorMessage": execution.error_message, - } - - async def abort_execution( - self, - execution_id: uuid.UUID, - user_id: str, - ) -> Dict[str, Any]: - """Abort execution.""" - execution = await self.exec_repo.get_by_id_and_user(execution_id, user_id) - if not execution: - raise NotFoundException("Execution not found") - - if execution.status != ExecutionStatus.EXECUTING: - raise BadRequestException(f"Cannot abort execution with status: {execution.status.value}") - - # stop the task via task_manager - thread_id = f"openapi_{execution_id}" - stopped = await task_manager.stop_task(thread_id) - if stopped: - await task_manager.cancel_task(thread_id) - - # update status - execution.status = ExecutionStatus.FAILED - execution.error_message = "Aborted by user" - execution.finished_at = datetime.now(timezone.utc) - await self.db.commit() - - return { - "executionId": str(execution.id), - "graphId": str(execution.graph_id), - "status": execution.status.value, - } - - async def get_result( - self, - execution_id: uuid.UUID, - user_id: str, - ) -> Dict[str, Any]: - """Get execution result.""" - execution = await self.exec_repo.get_by_id_and_user(execution_id, user_id) - if not execution: - raise NotFoundException("Execution not found") - - return { - "executionId": str(execution.id), - "graphId": str(execution.graph_id), - "status": execution.status.value, - "output": execution.output, - "errorMessage": execution.error_message, - "startedAt": execution.started_at.isoformat() if execution.started_at else None, - "finishedAt": execution.finished_at.isoformat() if execution.finished_at else None, - } diff --git a/backend/app/services/openclaw_instance_service.py b/backend/app/services/openclaw_instance_service.py index 978d1b16f..e3b5289af 100644 --- a/backend/app/services/openclaw_instance_service.py +++ b/backend/app/services/openclaw_instance_service.py @@ -23,13 +23,15 @@ from sqlalchemy import func, select, update from sqlalchemy.ext.asyncio import AsyncSession +from app.common.app_errors import InvalidRequestError, NotFoundError, ServiceUnavailableError, normalize_app_error from app.core.agent.backends.docker_check import get_docker_client +from app.core.settings import settings from app.models.enums import InstanceStatus from app.models.openclaw_instance import OpenClawInstance from app.services.base import BaseService -OPENCLAW_IMAGE = os.environ.get("OPENCLAW_IMAGE", "jdopensource/joysafeter-openclaw:latest") -OPENCLAW_NETWORK = os.environ.get("OPENCLAW_NETWORK", "joysafeter-network") +OPENCLAW_IMAGE = settings.openclaw_image +OPENCLAW_NETWORK = settings.openclaw_network PORT_RANGE_START = 19001 PORT_RANGE_END = 19999 GATEWAY_READY_TIMEOUT = 300 @@ -94,7 +96,11 @@ async def _allocate_port(self) -> int: for p in range(PORT_RANGE_START, PORT_RANGE_END + 1): if p not in used_ports: return p - raise RuntimeError("No available ports for OpenClaw instances") + raise ServiceUnavailableError( + "No available ports for OpenClaw instances", + code="OPENCLAW_PORT_POOL_EXHAUSTED", + data={"port_range_start": PORT_RANGE_START, "port_range_end": PORT_RANGE_END}, + ) return next_port async def ensure_instance_running(self, user_id: str) -> OpenClawInstance: @@ -139,10 +145,16 @@ async def ensure_instance_running(self, user_id: str) -> OpenClawInstance: await self.db.refresh(instance) return instance except Exception as e: - logger.error(f"Failed to start OpenClaw instance for user {user_id}: {e}") - await self._update_status(instance.id, InstanceStatus.FAILED, error_message=str(e)) + app_error = normalize_app_error( + e, + default_code="OPENCLAW_INSTANCE_START_FAILED", + default_message="Failed to start OpenClaw instance", + default_data={"user_id": user_id, "instance_id": instance.id}, + ) + logger.error(f"Failed to start OpenClaw instance for user {user_id}: {app_error}") + await self._update_status(instance.id, InstanceStatus.FAILED, error_message=app_error.message) await self.db.refresh(instance) - raise RuntimeError(f"Failed to start OpenClaw instance: {e}") + raise app_error async def _create_container(self, instance: OpenClawInstance, recreate: bool = False) -> str: """Create and start a Docker container for the instance, or start an existing one.""" @@ -156,7 +168,11 @@ async def _create_container(self, instance: OpenClawInstance, recreate: bool = F try: client = get_docker_client() except Exception as e: - raise RuntimeError(f"Failed to connect to Docker daemon: {e}") + raise ServiceUnavailableError( + "Failed to connect to Docker daemon", + code="DOCKER_DAEMON_UNAVAILABLE", + data={"detail": str(e)}, + ) # Try to find and start existing container if not recreate: @@ -220,17 +236,9 @@ async def _create_container(self, instance: OpenClawInstance, recreate: bool = F } # Pass through AI provider keys from host environment - for key in ( - "ANTHROPIC_API_KEY", - "OPENAI_API_KEY", - "AI_GATEWAY_BASE_URL", - "AI_GATEWAY_API_KEY", - "AI_GATEWAY_PROVIDER", - "AI_GATEWAY_MODEL", - "ANTHROPIC_BASE_URL", - "ANTHROPIC_AUTH_TOKEN", - "ANTHROPIC_MODEL", - ): + from app.utils.credentials import PASSTHROUGH_ENV_KEYS + + for key in PASSTHROUGH_ENV_KEYS: val = os.environ.get(key) if val: env_vars[key] = val @@ -240,7 +248,16 @@ async def _create_container(self, instance: OpenClawInstance, recreate: bool = F has_anthropic = all(k in env_vars for k in ("ANTHROPIC_BASE_URL", "ANTHROPIC_AUTH_TOKEN", "ANTHROPIC_MODEL")) if not (has_ai_gw or has_anthropic): - raise ValueError("Missing required environment variables for AI Gateway (need AI_GATEWAY_* or ANTHROPIC_*)") + raise InvalidRequestError( + "Missing required environment variables for AI Gateway", + code="OPENCLAW_GATEWAY_ENV_MISSING", + data={ + "required_any_of": [ + ["AI_GATEWAY_BASE_URL", "AI_GATEWAY_API_KEY", "AI_GATEWAY_MODEL"], + ["ANTHROPIC_BASE_URL", "ANTHROPIC_AUTH_TOKEN", "ANTHROPIC_MODEL"], + ] + }, + ) # Also pass config overrides if instance.config_json: @@ -276,7 +293,11 @@ async def _create_container(self, instance: OpenClawInstance, recreate: bool = F restart_policy={"Name": "unless-stopped"}, ) except Exception as e: - raise RuntimeError(f"docker run failed: {str(e)}") + raise ServiceUnavailableError( + "Failed to create OpenClaw container", + code="OPENCLAW_CONTAINER_CREATE_FAILED", + data={"container_name": container_name, "detail": str(e)}, + ) container_id = str(container.id)[:12] logger.info( @@ -309,13 +330,25 @@ async def _wait_for_gateway(self, instance: OpenClawInstance) -> None: if container.status != "running": logs = await asyncio.to_thread(container.logs, tail=30) logs_str = logs.decode("utf-8") if isinstance(logs, bytes) else str(logs) - raise RuntimeError(f"Container died during startup. Logs:\n{logs_str}") + raise ServiceUnavailableError( + "OpenClaw container exited during startup", + code="OPENCLAW_CONTAINER_STARTUP_FAILED", + data={"container_id": instance.container_id, "detail": logs_str}, + ) except docker.errors.NotFound: - raise RuntimeError("Container was removed during startup.") + raise NotFoundError( + "OpenClaw container was removed during startup", + code="OPENCLAW_CONTAINER_NOT_FOUND", + data={"container_id": instance.container_id}, + ) except Exception as e: logger.warning(f"Failed to check container status: {e}") - raise RuntimeError(f"Gateway not ready within {GATEWAY_READY_TIMEOUT}s") + raise ServiceUnavailableError( + "OpenClaw gateway did not become ready in time", + code="OPENCLAW_GATEWAY_TIMEOUT", + data={"gateway_port": instance.gateway_port, "timeout_seconds": GATEWAY_READY_TIMEOUT}, + ) async def _health_check(self, instance: OpenClawInstance) -> bool: """Quick health check via HTTP OPTIONS to the gateway.""" diff --git a/backend/app/services/organization_service.py b/backend/app/services/organization_service.py index 9be6b1030..1b107499d 100644 --- a/backend/app/services/organization_service.py +++ b/backend/app/services/organization_service.py @@ -7,11 +7,11 @@ from pydantic import EmailStr -from app.common.exceptions import ( - BadRequestException, - ConflictException, - ForbiddenException, - NotFoundException, +from app.common.app_errors import ( + AccessDeniedError, + InvalidRequestError, + NotFoundError, + ResourceConflictError, ) from app.models.auth import AuthUser as User from app.models.enums import OrgRole @@ -47,7 +47,11 @@ async def create_organization( ) -> Dict: """Create an organization and set the current user as owner.""" if await self.org_repo.slug_exists(slug): - raise ConflictException("Slug already exists") + raise ResourceConflictError( + "Slug already exists", + code="ORGANIZATION_SLUG_ALREADY_EXISTS", + data={"slug": slug}, + ) organization = await self.org_repo.create( { @@ -84,7 +88,11 @@ async def set_active_organization( """Set the active organization (currently only validates membership and returns org info).""" organization = await self.org_repo.get_with_members(organization_id) if not organization: - raise NotFoundException("Organization not found") + raise NotFoundError( + "Organization not found", + code="ORGANIZATION_NOT_FOUND", + data={"organization_id": str(organization_id)}, + ) member = await self._ensure_member(organization_id, current_user.id) # if persistence is needed, write to user settings/session here @@ -99,7 +107,11 @@ async def get_organization( """Get organization details.""" organization = await self.org_repo.get_with_members(organization_id) if not organization: - raise NotFoundException("Organization not found") + raise NotFoundError( + "Organization not found", + code="ORGANIZATION_NOT_FOUND", + data={"organization_id": str(organization_id)}, + ) member = await self._ensure_member(organization_id, current_user.id) return self._serialize_org(organization, member.role, include_seats) @@ -116,7 +128,11 @@ async def update_organization( """Update organization basic info.""" organization = await self.org_repo.get_with_members(organization_id) if not organization: - raise NotFoundException("Organization not found") + raise NotFoundError( + "Organization not found", + code="ORGANIZATION_NOT_FOUND", + data={"organization_id": str(organization_id)}, + ) member = await self._ensure_member(organization_id, current_user.id) self._ensure_admin_or_owner(member) @@ -128,7 +144,11 @@ async def update_organization( update_data["logo"] = logo if slug is not None: if await self.org_repo.slug_exists(slug, exclude_id=organization.id): - raise ConflictException("Slug already exists") + raise ResourceConflictError( + "Slug already exists", + code="ORGANIZATION_SLUG_ALREADY_EXISTS", + data={"slug": slug}, + ) update_data["slug"] = slug if update_data: @@ -147,18 +167,30 @@ async def update_seats( """Update seats info.""" organization = await self.org_repo.get_with_members(organization_id) if not organization: - raise NotFoundException("Organization not found") + raise NotFoundError( + "Organization not found", + code="ORGANIZATION_NOT_FOUND", + data={"organization_id": str(organization_id)}, + ) member = await self._ensure_member(organization_id, current_user.id) self._ensure_admin_or_owner(member) self._validate_plan_for_seats(organization) if seats < 1 or seats > 50: - raise BadRequestException("Seats must be between 1 and 50") + raise InvalidRequestError( + "Seats must be between 1 and 50", + code="ORGANIZATION_SEATS_INVALID", + data={"seats": seats}, + ) current_members = await self.member_repo.count_by_org(organization.id) if seats < current_members: - raise BadRequestException("Seats cannot be less than current member count") + raise InvalidRequestError( + "Seats cannot be less than current member count", + code="ORGANIZATION_SEATS_BELOW_MEMBER_COUNT", + data={"seats": seats, "member_count": current_members}, + ) metadata = organization.metadata_ or {} seats_config = metadata.get("seats", {}) or {} @@ -181,7 +213,11 @@ async def list_members( """Get member list.""" organization = await self.org_repo.get_with_members(organization_id) if not organization: - raise NotFoundException("Organization not found") + raise NotFoundError( + "Organization not found", + code="ORGANIZATION_NOT_FOUND", + data={"organization_id": str(organization_id)}, + ) await self._ensure_member(organization_id, current_user.id) members = await self.member_repo.list_by_org(organization.id) @@ -198,28 +234,47 @@ async def invite_member( """Invite/add a new member.""" organization = await self.org_repo.get_with_members(organization_id) if not organization: - raise NotFoundException("Organization not found") + raise NotFoundError( + "Organization not found", + code="ORGANIZATION_NOT_FOUND", + data={"organization_id": str(organization_id)}, + ) inviter = await self._ensure_member(organization_id, current_user.id) self._ensure_admin_or_owner(inviter) normalized_role = role or OrgRole.MEMBER if normalized_role not in self.SUPPORTED_ROLES: - raise BadRequestException("Invalid role") + raise InvalidRequestError( + "Invalid role", + code="ORGANIZATION_MEMBER_ROLE_INVALID", + data={"role": normalized_role}, + ) if normalized_role == OrgRole.OWNER: - raise BadRequestException("Cannot assign owner when inviting") + raise InvalidRequestError( + "Cannot assign owner when inviting", + code="ORGANIZATION_OWNER_INVITE_FORBIDDEN", + ) invitee = await self.user_repo.get_by_email(email) if not invitee: - raise NotFoundException("User not found") + raise NotFoundError("User not found", code="USER_NOT_FOUND", data={"email": str(email)}) existing = await self.member_repo.get_by_user_and_org(invitee.id, organization.id) if existing: - raise ConflictException("User is already a member") + raise ResourceConflictError( + "User is already a member", + code="ORGANIZATION_MEMBER_ALREADY_EXISTS", + data={"user_id": str(invitee.id)}, + ) seat_info = self._build_seats_info(organization) if seat_info["available"] <= 0: - raise BadRequestException("No available seats") + raise InvalidRequestError( + "No available seats", + code="ORGANIZATION_SEATS_UNAVAILABLE", + data=seat_info, + ) member = await self.member_repo.create( { @@ -242,15 +297,21 @@ async def get_member( """Get member details.""" organization = await self.org_repo.get_with_members(organization_id) if not organization: - raise NotFoundException("Organization not found") + raise NotFoundError( + "Organization not found", + code="ORGANIZATION_NOT_FOUND", + data={"organization_id": str(organization_id)}, + ) requester = await self._ensure_member(organization_id, current_user.id) target = await self.member_repo.get_with_user(member_id) if not target or target.organization_id != organization.id: - raise NotFoundException("Member not found") + raise NotFoundError( + "Member not found", code="ORGANIZATION_MEMBER_NOT_FOUND", data={"member_id": str(member_id)} + ) if requester.user_id != target.user_id and requester.role not in [OrgRole.OWNER, OrgRole.ADMIN]: - raise ForbiddenException("Not allowed to view this member") + raise AccessDeniedError("Not allowed to view this member", code="ORGANIZATION_MEMBER_VIEW_FORBIDDEN") return self._serialize_member(target, include_usage) @@ -265,27 +326,43 @@ async def update_member_role( """Update member role.""" organization = await self.org_repo.get_with_members(organization_id) if not organization: - raise NotFoundException("Organization not found") + raise NotFoundError( + "Organization not found", + code="ORGANIZATION_NOT_FOUND", + data={"organization_id": str(organization_id)}, + ) actor = await self._ensure_member(organization_id, current_user.id) target = await self.member_repo.get_with_user(member_id) if not target or target.organization_id != organization.id: - raise NotFoundException("Member not found") + raise NotFoundError( + "Member not found", code="ORGANIZATION_MEMBER_NOT_FOUND", data={"member_id": str(member_id)} + ) if role not in self.SUPPORTED_ROLES: - raise BadRequestException("Invalid role") + raise InvalidRequestError( + "Invalid role", + code="ORGANIZATION_MEMBER_ROLE_INVALID", + data={"role": role}, + ) if role == OrgRole.OWNER: - raise BadRequestException("Owner role cannot be reassigned") + raise InvalidRequestError( + "Owner role cannot be reassigned", code="ORGANIZATION_OWNER_ROLE_REASSIGN_FORBIDDEN" + ) if target.role == OrgRole.OWNER: - raise ForbiddenException("Cannot modify owner role") + raise AccessDeniedError("Cannot modify owner role", code="ORGANIZATION_OWNER_ROLE_MODIFY_FORBIDDEN") # access control: only owner can promote to admin; admins can also demote if role == OrgRole.ADMIN and actor.role != OrgRole.OWNER: - raise ForbiddenException("Only owner can promote to admin") + raise AccessDeniedError("Only owner can promote to admin", code="ORGANIZATION_ADMIN_PROMOTE_FORBIDDEN") if actor.role not in [OrgRole.OWNER, OrgRole.ADMIN]: - raise ForbiddenException("Insufficient permission to update roles") + raise AccessDeniedError( + "Insufficient permission to update roles", code="ORGANIZATION_MEMBER_ROLE_UPDATE_FORBIDDEN" + ) if actor.role == OrgRole.ADMIN and target.role in [OrgRole.ADMIN, OrgRole.OWNER]: - raise ForbiddenException("Admin cannot change other admins/owner") + raise AccessDeniedError( + "Admin cannot change other admins/owner", code="ORGANIZATION_ADMIN_ROLE_TARGET_FORBIDDEN" + ) target.role = role await self.commit() @@ -303,19 +380,27 @@ async def remove_member( """Remove a member.""" organization = await self.org_repo.get_with_members(organization_id) if not organization: - raise NotFoundException("Organization not found") + raise NotFoundError( + "Organization not found", + code="ORGANIZATION_NOT_FOUND", + data={"organization_id": str(organization_id)}, + ) actor = await self._ensure_member(organization_id, current_user.id) target = await self.member_repo.get_with_user(member_id) if not target or target.organization_id != organization.id: - raise NotFoundException("Member not found") + raise NotFoundError( + "Member not found", code="ORGANIZATION_MEMBER_NOT_FOUND", data={"member_id": str(member_id)} + ) if target.role == OrgRole.OWNER: - raise ForbiddenException("Cannot remove organization owner") + raise AccessDeniedError("Cannot remove organization owner", code="ORGANIZATION_OWNER_REMOVE_FORBIDDEN") if actor.role == OrgRole.ADMIN and target.role in [OrgRole.ADMIN, OrgRole.OWNER]: - raise ForbiddenException("Admin cannot remove admins or owner") + raise AccessDeniedError( + "Admin cannot remove admins or owner", code="ORGANIZATION_ADMIN_REMOVE_TARGET_FORBIDDEN" + ) if actor.role not in [OrgRole.OWNER, OrgRole.ADMIN] and actor.user_id != target.user_id: - raise ForbiddenException("Not allowed to remove this member") + raise AccessDeniedError("Not allowed to remove this member", code="ORGANIZATION_MEMBER_REMOVE_FORBIDDEN") members_before = await self.member_repo.count_by_org(organization.id) await self.member_repo.delete(target.id) @@ -341,17 +426,23 @@ async def _ensure_member(self, organization_id: uuid.UUID, user_id: str | uuid.U user_id_str = str(user_id) member = await self.member_repo.get_by_user_and_org(user_id_str, organization_id) if not member: - raise ForbiddenException("No access to this organization") + raise AccessDeniedError("No access to this organization", code="ORGANIZATION_ACCESS_DENIED") return member # type: ignore def _ensure_admin_or_owner(self, member: Member) -> None: if member.role not in [OrgRole.OWNER, OrgRole.ADMIN]: - raise ForbiddenException("Only owner or admin can perform this action") + raise AccessDeniedError( + "Only owner or admin can perform this action", code="ORGANIZATION_PERMISSION_DENIED" + ) def _validate_plan_for_seats(self, organization: Organization) -> None: plan = (organization.metadata_ or {}).get("plan_type", self.TEAM_PLAN) if plan != self.TEAM_PLAN: - raise BadRequestException("Seat management is available only for team plan") + raise InvalidRequestError( + "Seat management is available only for team plan", + code="ORGANIZATION_PLAN_UNSUPPORTED", + data={"plan": plan}, + ) def _serialize_org(self, organization: Organization, role: str, include_seats: bool) -> Dict: data = { @@ -412,8 +503,9 @@ def _serialize_member(self, member: Member, include_usage: bool) -> Dict: return data def _build_member_usage(self, member: Member) -> Dict: - # Stub: usage stats not yet implemented + # TODO: implement real usage stats per member return { - "messages": 0, - "storage_bytes": 0, + "messages": None, + "storage_bytes": None, + "_note": "Usage stats not yet implemented", } diff --git a/backend/app/services/platform_token_service.py b/backend/app/services/platform_token_service.py index 26ecfcb48..aabc37b2a 100644 --- a/backend/app/services/platform_token_service.py +++ b/backend/app/services/platform_token_service.py @@ -7,7 +7,7 @@ from datetime import datetime from typing import List, Optional, Tuple -from app.common.exceptions import BadRequestException, ForbiddenException, NotFoundException +from app.common.app_errors import AccessDeniedError, InvalidRequestError, NotFoundError from app.common.permissions import VALID_SCOPES from app.models.enums import OrgRole from app.models.platform_token import PlatformToken @@ -40,19 +40,32 @@ async def create_token( # Check limit active_count = await self.repo.count_active_by_user(user_id) if active_count >= MAX_ACTIVE_TOKENS_PER_USER: - raise BadRequestException(f"Maximum of {MAX_ACTIVE_TOKENS_PER_USER} active tokens reached") + raise InvalidRequestError( + f"Maximum of {MAX_ACTIVE_TOKENS_PER_USER} active tokens reached", + code="PLATFORM_TOKEN_LIMIT_EXCEEDED", + data={"limit": MAX_ACTIVE_TOKENS_PER_USER}, + ) # Validate scopes invalid = set(scopes) - set(VALID_SCOPES) if invalid: - raise BadRequestException(f"Invalid scopes: {invalid}") + raise InvalidRequestError( + f"Invalid scopes: {invalid}", + code="PLATFORM_TOKEN_SCOPES_INVALID", + data={"scopes": sorted(invalid)}, + ) # Validate resource_type/resource_id pair if (resource_type is None) != (resource_id is None): - raise BadRequestException("resource_type and resource_id must both be provided or both be null") + raise InvalidRequestError( + "resource_type and resource_id must both be provided or both be null", + code="PLATFORM_TOKEN_RESOURCE_BINDING_INVALID", + ) if resource_type is not None and resource_type not in self.VALID_RESOURCE_TYPES: - raise BadRequestException( - f"Invalid resource_type: {resource_type}. Must be one of {self.VALID_RESOURCE_TYPES}" + raise InvalidRequestError( + f"Invalid resource_type: {resource_type}. Must be one of {self.VALID_RESOURCE_TYPES}", + code="PLATFORM_TOKEN_RESOURCE_TYPE_INVALID", + data={"resource_type": resource_type}, ) # Validate resource exists and user has permission @@ -65,11 +78,19 @@ async def create_token( skill_repo = SkillRepository(self.db) skill = await skill_repo.get(resource_id) if not skill: - raise NotFoundException(f"Skill {resource_id} not found") + raise NotFoundError( + "Skill not found", + code="SKILL_NOT_FOUND", + data={"skill_id": str(resource_id)}, + ) try: await check_skill_access(self.db, skill, user_id, CollaboratorRole.editor) - except ForbiddenException: - raise ForbiddenException("No permission to create token for this skill") + except AccessDeniedError: + raise AccessDeniedError( + "No permission to create token for this skill", + code="PLATFORM_TOKEN_SKILL_ACCESS_DENIED", + data={"skill_id": str(resource_id)}, + ) elif resource_type == "graph": from app.repositories.workspace import WorkspaceMemberRepository, WorkspaceRepository @@ -77,20 +98,36 @@ async def create_token( member_repo = WorkspaceMemberRepository(self.db) workspace = await workspace_repo.get(resource_id) if not workspace: - raise NotFoundException(f"Workspace {resource_id} not found") + raise NotFoundError( + "Workspace not found", + code="WORKSPACE_NOT_FOUND", + data={"workspace_id": str(resource_id)}, + ) if workspace.owner_id != user_id: member = await member_repo.get_member(resource_id, user_id) if not member or member.role not in {OrgRole.ADMIN, OrgRole.OWNER}: - raise ForbiddenException("No permission to create token for this workspace") + raise AccessDeniedError( + "No permission to create token for this workspace", + code="PLATFORM_TOKEN_WORKSPACE_ACCESS_DENIED", + data={"workspace_id": str(resource_id)}, + ) elif resource_type == "tool": from app.repositories.tool import ToolRepository tool_repo = ToolRepository(self.db) tool = await tool_repo.get(resource_id) if not tool: - raise NotFoundException(f"Tool {resource_id} not found") + raise NotFoundError( + "Tool not found", + code="TOOL_NOT_FOUND", + data={"tool_id": str(resource_id)}, + ) if tool.owner_id != user_id: - raise ForbiddenException("No permission to create token for this tool") + raise AccessDeniedError( + "No permission to create token for this tool", + code="PLATFORM_TOKEN_TOOL_ACCESS_DENIED", + data={"tool_id": str(resource_id)}, + ) # Generate token raw_secret = secrets.token_urlsafe(36) @@ -133,8 +170,12 @@ async def revoke_token( ) -> None: pt = await self.repo.get(token_id) if not pt: - raise NotFoundException("Token not found") + raise NotFoundError("Token not found", code="PLATFORM_TOKEN_NOT_FOUND", data={"token_id": str(token_id)}) if pt.user_id != user_id: - raise ForbiddenException("You can only revoke your own tokens") + raise AccessDeniedError( + "You can only revoke your own tokens", + code="PLATFORM_TOKEN_REVOKE_FORBIDDEN", + data={"token_id": str(token_id)}, + ) pt.is_active = False await self.db.commit() diff --git a/backend/app/services/run_reducers/__init__.py b/backend/app/services/run_reducers/__init__.py deleted file mode 100644 index 703fd951e..000000000 --- a/backend/app/services/run_reducers/__init__.py +++ /dev/null @@ -1,49 +0,0 @@ -"""Run projection reducers.""" - -from app.services.agent_registry import AgentDefinition, agent_registry - -from .chat import apply_chat_event -from .chat import make_initial_projection as chat_make_initial_projection -from .copilot import apply_copilot_event -from .copilot import make_initial_projection as copilot_make_initial_projection -from .skill_creator import apply_skill_creator_event, make_initial_projection - -agent_registry.register( - AgentDefinition( - agent_name="skill_creator", - display_name="Skill Creator", - run_type="skill_creator", - reducer=apply_skill_creator_event, - make_initial_projection=make_initial_projection, - ) -) - -agent_registry.register( - AgentDefinition( - agent_name="chat", - display_name="Chat", - run_type="chat_turn", - reducer=apply_chat_event, - make_initial_projection=chat_make_initial_projection, - ) -) - -agent_registry.register( - AgentDefinition( - agent_name="copilot", - display_name="Copilot", - run_type="copilot_turn", - reducer=apply_copilot_event, - make_initial_projection=copilot_make_initial_projection, - ) -) - -__all__ = [ - "agent_registry", - "apply_chat_event", - "apply_copilot_event", - "apply_skill_creator_event", - "chat_make_initial_projection", - "copilot_make_initial_projection", - "make_initial_projection", -] diff --git a/backend/app/services/run_reducers/chat.py b/backend/app/services/run_reducers/chat.py deleted file mode 100644 index 9b4cae0c7..000000000 --- a/backend/app/services/run_reducers/chat.py +++ /dev/null @@ -1,171 +0,0 @@ -""" -Chat run projection reducer. - -Each chat turn is tracked as a single run projection containing one -user_message and one assistant_message (with optional tool_calls), -rather than a messages array as in skill_creator. -""" - -from __future__ import annotations - -from copy import deepcopy -from typing import Any - - -def _deepcopy_projection(projection: dict[str, Any] | None) -> dict[str, Any]: - if projection is not None: - return deepcopy(projection) - return { - "version": 1, - "run_type": "chat_turn", - "status": "queued", - "graph_id": None, - "thread_id": None, - "user_message": None, - "assistant_message": None, - "file_tree": {}, - "preview_data": None, - "node_execution_log": [], - "interrupt": None, - "meta": {}, - } - - -def make_initial_projection(payload: dict[str, Any], status: str) -> dict[str, Any]: - projection = _deepcopy_projection(None) - projection["status"] = status - projection["graph_id"] = payload.get("graph_id") - projection["thread_id"] = payload.get("thread_id") - return projection - - -def apply_chat_event( - projection: dict[str, Any] | None, - *, - event_type: str, - payload: dict[str, Any], - status: str, -) -> dict[str, Any]: - next_projection = _deepcopy_projection(projection) - next_projection["status"] = status - - if event_type == "run_initialized": - return make_initial_projection( - { - "graph_id": payload.get("graph_id"), - "thread_id": payload.get("thread_id"), - }, - status, - ) - - if event_type == "user_message_added": - message = payload.get("message") - if isinstance(message, dict): - next_projection["user_message"] = message - return next_projection - - if event_type == "assistant_message_started": - message = payload.get("message") - if isinstance(message, dict): - next_projection["assistant_message"] = message - return next_projection - - if event_type == "content_delta": - message_id = payload.get("message_id") - delta = payload.get("delta") or "" - if not message_id or not delta: - return next_projection - msg = next_projection["assistant_message"] - if isinstance(msg, dict) and msg.get("id") == message_id: - msg["content"] = f"{msg.get('content', '')}{delta}" - return next_projection - - if event_type == "tool_start": - message_id = payload.get("message_id") - tool = payload.get("tool") - if not message_id or not isinstance(tool, dict): - return next_projection - msg = next_projection["assistant_message"] - if isinstance(msg, dict) and msg.get("id") == message_id: - tools = msg.setdefault("tool_calls", []) - tools.append(tool) - return next_projection - - if event_type == "tool_end": - tool_id = payload.get("tool_id") - tool_output = payload.get("tool_output") - tool_name = payload.get("tool_name") - end_time = payload.get("end_time") - if isinstance(next_projection["assistant_message"], dict): - msg = next_projection["assistant_message"] - for tool in msg.get("tool_calls", []): - if tool_id and tool.get("id") != tool_id: - continue - if not tool_id and tool.get("status") != "running": - continue - tool["status"] = "completed" - tool["result"] = tool_output - if end_time is not None: - tool["endTime"] = end_time - break - if tool_name == "preview_skill" and tool_output is not None: - next_projection["preview_data"] = tool_output - return next_projection - - if event_type == "file_event": - path = payload.get("path") - action = payload.get("action") - if not path or not action: - return next_projection - if action == "delete": - next_projection["file_tree"].pop(path, None) - else: - next_projection["file_tree"][path] = { - "action": action, - "size": payload.get("size"), - "timestamp": payload.get("timestamp"), - } - return next_projection - - if event_type == "node_start": - node_id = payload.get("node_id") - node_name = payload.get("node_name") - start_time = payload.get("start_time") - next_projection["node_execution_log"].append( - { - "node_id": node_id, - "node_name": node_name, - "status": "running", - "start_time": start_time, - "end_time": None, - } - ) - return next_projection - - if event_type == "node_end": - node_name = payload.get("node_name") - end_time = payload.get("end_time") - for entry in reversed(next_projection["node_execution_log"]): - if entry.get("node_name") == node_name and entry.get("status") == "running": - entry["status"] = "completed" - entry["end_time"] = end_time - break - return next_projection - - if event_type == "interrupt": - next_projection["interrupt"] = payload.get("interrupt") - return next_projection - - if event_type == "error": - next_projection["meta"]["error"] = payload.get("message") - return next_projection - - if event_type == "done": - next_projection["meta"]["completed"] = True - return next_projection - - if event_type == "status": - next_projection["meta"]["status_message"] = payload.get("message") - return next_projection - - return next_projection diff --git a/backend/app/services/run_reducers/copilot.py b/backend/app/services/run_reducers/copilot.py deleted file mode 100644 index 4ca5bcc54..000000000 --- a/backend/app/services/run_reducers/copilot.py +++ /dev/null @@ -1,105 +0,0 @@ -""" -Copilot run projection reducer. - -Each copilot turn is tracked as a single run projection containing -streaming content, thought steps, tool calls/results, and final -result message + graph actions. -""" - -from __future__ import annotations - -from copy import deepcopy -from typing import Any - -_INITIAL: dict[str, Any] = { - "version": 1, - "run_type": "copilot_turn", - "status": "queued", - "stage": None, - "content": "", - "thought_steps": [], - "tool_calls": [], - "tool_results": [], - "result_message": None, - "result_actions": [], - "error": None, - "graph_id": None, - "mode": None, -} - - -def _deepcopy_projection(projection: dict[str, Any] | None) -> dict[str, Any]: - if projection is not None: - return deepcopy(projection) - return deepcopy(_INITIAL) - - -def make_initial_projection(payload: dict[str, Any], status: str) -> dict[str, Any]: - projection = _deepcopy_projection(None) - projection["status"] = status - projection["graph_id"] = payload.get("graph_id") - projection["mode"] = payload.get("mode") - return projection - - -def apply_copilot_event( - projection: dict[str, Any] | None, - *, - event_type: str, - payload: dict[str, Any], - status: str, -) -> dict[str, Any]: - next_p = _deepcopy_projection(projection) - next_p["status"] = status - - if event_type == "run_initialized": - return make_initial_projection( - {"graph_id": payload.get("graph_id"), "mode": payload.get("mode")}, - status, - ) - - if event_type == "status": - next_p["stage"] = payload.get("stage") - return next_p - - if event_type == "content_delta": - next_p["content"] += payload.get("delta", "") - return next_p - - if event_type == "thought_step": - step = payload.get("step") - if step: - next_p["thought_steps"].append(step) - return next_p - - if event_type == "tool_call": - next_p["tool_calls"].append( - { - "tool": payload.get("tool", ""), - "input": payload.get("input", {}), - } - ) - return next_p - - if event_type == "tool_result": - action = payload.get("action") - if action: - next_p["tool_results"].append(action) - return next_p - - if event_type == "result": - next_p["result_message"] = payload.get("message", "") - next_p["result_actions"] = payload.get("actions", []) - return next_p - - if event_type == "error": - next_p["status"] = "failed" - next_p["error"] = payload.get("message") - return next_p - - if event_type == "done": - if next_p["status"] != "failed": - next_p["status"] = "completed" - return next_p - - return next_p diff --git a/backend/app/services/run_reducers/skill_creator.py b/backend/app/services/run_reducers/skill_creator.py deleted file mode 100644 index 8b11b9cfd..000000000 --- a/backend/app/services/run_reducers/skill_creator.py +++ /dev/null @@ -1,152 +0,0 @@ -""" -Skill Creator run projection reducer. -""" - -from __future__ import annotations - -from copy import deepcopy -from typing import Any - - -def _deepcopy_projection(projection: dict[str, Any] | None) -> dict[str, Any]: - if projection is not None: - return deepcopy(projection) - return { - "version": 1, - "run_type": "skill_creator", - "status": "queued", - "graph_id": None, - "thread_id": None, - "edit_skill_id": None, - "messages": [], - "current_assistant_message_id": None, - "preview_data": None, - "file_tree": {}, - "interrupt": None, - "meta": {}, - } - - -def make_initial_projection(payload: dict[str, Any], status: str) -> dict[str, Any]: - projection = _deepcopy_projection(None) - projection["status"] = status - projection["graph_id"] = payload.get("graph_id") - projection["thread_id"] = payload.get("thread_id") - projection["edit_skill_id"] = payload.get("edit_skill_id") - return projection - - -def apply_skill_creator_event( - projection: dict[str, Any] | None, - *, - event_type: str, - payload: dict[str, Any], - status: str, -) -> dict[str, Any]: - next_projection = _deepcopy_projection(projection) - next_projection["status"] = status - - if event_type == "run_initialized": - return make_initial_projection( - { - "graph_id": payload.get("graph_id"), - "thread_id": payload.get("thread_id"), - "edit_skill_id": payload.get("edit_skill_id"), - }, - status, - ) - - if event_type == "user_message_added": - message = payload.get("message") - if isinstance(message, dict): - next_projection["messages"].append(message) - return next_projection - - if event_type == "assistant_message_started": - message = payload.get("message") - if isinstance(message, dict): - next_projection["messages"].append(message) - next_projection["current_assistant_message_id"] = message.get("id") - return next_projection - - if event_type == "content_delta": - message_id = payload.get("message_id") - delta = payload.get("delta") or "" - if not message_id or not delta: - return next_projection - for message in next_projection["messages"]: - if message.get("id") == message_id: - message["content"] = f"{message.get('content', '')}{delta}" - break - return next_projection - - if event_type == "tool_start": - message_id = payload.get("message_id") - tool = payload.get("tool") - if not message_id or not isinstance(tool, dict): - return next_projection - for message in next_projection["messages"]: - if message.get("id") == message_id: - tools = message.setdefault("tool_calls", []) - tools.append(tool) - break - return next_projection - - if event_type == "tool_end": - message_id = payload.get("message_id") - tool_id = payload.get("tool_id") - tool_output = payload.get("tool_output") - tool_name = payload.get("tool_name") - end_time = payload.get("end_time") - for message in next_projection["messages"]: - if message.get("id") != message_id: - continue - for tool in message.get("tool_calls", []): - if tool_id and tool.get("id") != tool_id: - continue - if not tool_id and tool.get("status") != "running": - continue - tool["status"] = "completed" - tool["result"] = tool_output - if end_time is not None: - tool["endTime"] = end_time - break - break - if tool_name == "preview_skill" and tool_output is not None: - next_projection["preview_data"] = tool_output - return next_projection - - if event_type == "file_event": - path = payload.get("path") - action = payload.get("action") - if not path or not action: - return next_projection - if action == "delete": - next_projection["file_tree"].pop(path, None) - else: - next_projection["file_tree"][path] = { - "action": action, - "size": payload.get("size"), - "timestamp": payload.get("timestamp"), - } - return next_projection - - if event_type == "interrupt": - next_projection["interrupt"] = payload.get("interrupt") - next_projection["current_assistant_message_id"] = None - return next_projection - - if event_type == "error": - next_projection["meta"]["error"] = payload.get("message") - return next_projection - - if event_type == "done": - next_projection["meta"]["completed"] = True - next_projection["current_assistant_message_id"] = None - return next_projection - - if event_type == "status": - next_projection["meta"]["status_message"] = payload.get("message") - return next_projection - - return next_projection diff --git a/backend/app/services/run_service.py b/backend/app/services/run_service.py deleted file mode 100644 index f32b6532a..000000000 --- a/backend/app/services/run_service.py +++ /dev/null @@ -1,638 +0,0 @@ -""" -Service layer for durable agent runs. -""" - -from __future__ import annotations - -import asyncio -import uuid -from datetime import datetime -from typing import Any, Awaitable, Optional - -from loguru import logger -from sqlalchemy.ext.asyncio import AsyncSession - -from app.core.redis import RedisClient -from app.core.settings import settings -from app.models.agent_run import AgentRun, AgentRunEvent, AgentRunSnapshot, AgentRunStatus -from app.models.enums import ModelUsageSource -from app.repositories.agent_run import AgentRunRepository -from app.services.agent_registry import AgentDefinition -from app.services.run_reducers import agent_registry -from app.utils.datetime import utc_now -from app.websocket.run_subscription_manager import run_subscription_manager - -# Throttle Redis snapshot writes for content_delta events (500ms per run). -_SNAPSHOT_THROTTLE_SECONDS = 0.5 -_snapshot_last_published: dict[str, float] = {} - - -def _build_snapshot_dict(run_id: str, snapshot: AgentRunSnapshot) -> dict[str, Any]: - return { - "run_id": run_id, - "status": snapshot.status, - "last_seq": snapshot.last_seq, - "projection": snapshot.projection, - } - - -class RunService: - """Orchestrates durable agent run lifecycle, event sourcing, and snapshot management.""" - - def __init__(self, db: AsyncSession): - self.db = db - self.repo = AgentRunRepository(db) - - async def list_agents(self) -> list[AgentDefinition]: - """Return all registered agent definitions. - - Returns: - List of available agent definitions. - """ - return agent_registry.list_definitions() - - def get_agent_definition(self, agent_name: str) -> AgentDefinition: - """Look up an agent definition by name. - - Args: - agent_name: Registered name of the agent. - - Returns: - The matching agent definition. - - Raises: - ValueError: If no agent with the given name is registered. - """ - try: - return agent_registry.get(agent_name) - except KeyError: - raise ValueError(f"Unknown agent: {agent_name}") - - def get_agent_display_name(self, agent_name: str | None) -> str | None: - """Return the human-readable display name for an agent. - - Args: - agent_name: Registered agent name, or None. - - Returns: - The display name if the agent is found, otherwise the raw agent_name. - """ - definition = agent_registry.find(agent_name) - return definition.display_name if definition else agent_name - - async def create_run( - self, - *, - user_id: str, - agent_name: str, - graph_id: uuid.UUID, - thread_id: Optional[str], - message: str, - input: Optional[dict[str, Any]] = None, - workspace_id: Optional[uuid.UUID] = None, - source: str = "run_center", - run_type: Optional[str] = None, - ) -> AgentRun: - """Create a new agent run with its initial snapshot and user message event. - - Args: - user_id: ID of the user initiating the run. - agent_name: Registered agent name to execute. - graph_id: Graph to run the agent against. - thread_id: Conversation thread ID; auto-generated if None. - message: Initial user message for the run. - input: Optional extra input parameters forwarded to the agent. - workspace_id: Optional workspace scope for the run. - source: Origin label (e.g. "run_center", "skills_creator_page"). - run_type: Override the agent's default run type if provided. - - Returns: - The newly created AgentRun record. - - Raises: - ValueError: If agent_name is not registered. - """ - definition = self.get_agent_definition(agent_name) - resolved_thread_id = thread_id or str(uuid.uuid4()) - run_input = dict(input or {}) - - from app.core.trace_context import get_trace_id - - trace_id = get_trace_id() - run = AgentRun( - user_id=user_id, - workspace_id=workspace_id, - graph_id=graph_id, - thread_id=resolved_thread_id, - run_type=definition.run_type if run_type is None else run_type, - agent_name=agent_name, - source=source, - status=AgentRunStatus.QUEUED, - title=message[:100] if message else definition.display_name, - request_payload={ - "agent_name": agent_name, - "message": message, - "graph_id": str(graph_id), - "thread_id": resolved_thread_id, - "input": run_input, - # Spread run_input at top level so consumers that read keys - # directly (e.g. edit_skill_id) continue to work. - **run_input, - }, - last_heartbeat_at=utc_now(), - trace_id=uuid.UUID(trace_id) if trace_id else None, - ) - self.db.add(run) - await self.db.flush() - - snapshot = AgentRunSnapshot( - run_id=run.id, - last_seq=0, - status=run.status.value, - projection=definition.make_initial_projection( - { - "graph_id": str(graph_id), - "thread_id": resolved_thread_id, - **run_input, - }, - run.status.value, - ), - ) - self.db.add(snapshot) - await self.db.flush() - - await self.append_event( - run_id=run.id, - event_type="user_message_added", - payload={ - "message": { - "id": f"msg-user-{uuid.uuid4()}", - "role": "user", - "content": message, - "timestamp": int(utc_now().timestamp() * 1000), - } - }, - commit=False, - ) - await self.db.commit() - await self.db.refresh(run) - await RedisClient.set_run_snapshot( - str(run.id), - _build_snapshot_dict(str(run.id), snapshot), - ) - return run - - async def create_skill_creator_run( - self, - *, - user_id: str, - graph_id: uuid.UUID, - thread_id: Optional[str], - message: str, - edit_skill_id: Optional[str], - workspace_id: Optional[uuid.UUID] = None, - ) -> AgentRun: - """Create a run specifically for the skill_creator agent. - - Args: - user_id: ID of the user initiating the run. - graph_id: Graph to run the agent against. - thread_id: Conversation thread ID; auto-generated if None. - message: Initial user message for skill creation/editing. - edit_skill_id: Existing skill ID to edit, or None for new skill creation. - workspace_id: Optional workspace scope for the run. - - Returns: - The newly created AgentRun record. - """ - return await self.create_run( - user_id=user_id, - agent_name="skill_creator", - graph_id=graph_id, - thread_id=thread_id, - message=message, - input={"edit_skill_id": edit_skill_id}, - workspace_id=workspace_id, - source=ModelUsageSource.SKILLS_CREATOR, - ) - - async def get_run(self, run_id: uuid.UUID, user_id: str) -> Optional[AgentRun]: - """Fetch a single run by ID, scoped to the given user. - - Args: - run_id: Unique identifier of the run. - user_id: Owner user ID used for access control. - - Returns: - The AgentRun if found and owned by the user, otherwise None. - """ - return await self.repo.get_by_id_and_user(run_id, user_id) - - async def get_snapshot(self, run_id: uuid.UUID, user_id: str) -> Optional[AgentRunSnapshot]: - """Fetch the latest projection snapshot for a run. - - Args: - run_id: Unique identifier of the run. - user_id: Owner user ID used for access control. - - Returns: - The snapshot if the run exists and belongs to the user, otherwise None. - """ - run = await self.get_run(run_id, user_id) - if not run: - return None - return await self.repo.get_snapshot(run_id) - - async def list_events_after( - self, run_id: uuid.UUID, user_id: str, after_seq: int = 0, limit: int = 500 - ) -> list[AgentRunEvent]: - """List run events with sequence numbers greater than after_seq. - - Args: - run_id: Unique identifier of the run. - user_id: Owner user ID used for access control. - after_seq: Return only events with seq > this value. - limit: Maximum number of events to return. - - Returns: - List of events ordered by sequence number, or empty list if the - run is not found. - """ - run = await self.get_run(run_id, user_id) - if not run: - return [] - return list(await self.repo.list_events_after(run_id, after_seq=after_seq, limit=limit)) - - async def find_latest_active_skill_creator_run( - self, *, user_id: str, graph_id: uuid.UUID, thread_id: Optional[str] = None - ) -> Optional[AgentRun]: - """Find the most recent active skill_creator run for a user and graph. - - Args: - user_id: Owner user ID. - graph_id: Graph the run belongs to. - thread_id: Optional thread ID to narrow the search. - - Returns: - The latest active skill_creator run, or None. - """ - return await self.find_latest_active_run( - user_id=user_id, - agent_name="skill_creator", - graph_id=graph_id, - thread_id=thread_id, - ) - - async def find_latest_active_run( - self, - *, - user_id: str, - agent_name: str, - graph_id: Optional[uuid.UUID] = None, - thread_id: Optional[str] = None, - ) -> Optional[AgentRun]: - """Find the most recent active run for a given agent, user, and graph. - - Args: - user_id: Owner user ID. - agent_name: Registered agent name. - graph_id: Graph the run belongs to. - thread_id: Optional thread ID to narrow the search. - - Returns: - The latest active AgentRun, or None if no active run exists. - """ - return await self.repo.find_latest_active_run( - user_id=user_id, - agent_name=agent_name, - graph_id=graph_id, - thread_id=thread_id, - ) - - async def list_recent_runs( - self, - *, - user_id: str, - run_type: Optional[str] = None, - agent_name: Optional[str] = None, - status: Optional[str] = None, - search: Optional[str] = None, - limit: int = 50, - ) -> list[AgentRun]: - """List recent runs for a user with optional filters. - - Args: - user_id: Owner user ID. - run_type: Filter by run type (e.g. "skill_creator"). - agent_name: Filter by agent name. - status: Filter by run status string. - search: Free-text search against run titles. - limit: Maximum number of runs to return. - - Returns: - List of matching AgentRun records, most recent first. - """ - return list( - await self.repo.list_recent_runs_for_user( - user_id=user_id, - run_type=run_type, - agent_name=agent_name, - status=status, - search=search, - limit=limit, - ) - ) - - async def mark_status( - self, - *, - run_id: uuid.UUID, - user_id: Optional[str], - status: AgentRunStatus, - runtime_owner_id: Optional[str] = None, - error_code: Optional[str] = None, - error_message: Optional[str] = None, - result_summary: Optional[dict[str, Any]] = None, - ) -> Optional[AgentRun]: - """Transition a run to a new status, update its snapshot, and broadcast the change. - - Handles runtime ownership assignment on RUNNING, clears it on terminal - states, and publishes status updates via Redis and WebSocket. - - Args: - run_id: Unique identifier of the run. - user_id: Owner user ID, or None for system-level transitions. - status: Target status to transition to. - runtime_owner_id: Runtime instance claiming this run (for RUNNING). - error_code: Machine-readable error code (for FAILED). - error_message: Human-readable error description (for FAILED). - result_summary: Optional summary dict merged into the run record. - - Returns: - The updated AgentRun, or None if the run was not found. - """ - run = await self.repo.get_run_for_update(run_id, user_id=user_id) - if not run: - return None - - heartbeat_at = utc_now() - run.status = status - run.error_code = error_code - run.error_message = error_message - run.last_heartbeat_at = heartbeat_at - if status == AgentRunStatus.RUNNING: - run.runtime_owner_id = runtime_owner_id or run.runtime_owner_id or settings.run_runtime_instance_id - elif status in { - AgentRunStatus.INTERRUPT_WAIT, - AgentRunStatus.COMPLETED, - AgentRunStatus.FAILED, - AgentRunStatus.CANCELLED, - }: - run.runtime_owner_id = None - if result_summary is not None: - run.result_summary = result_summary - if status in {AgentRunStatus.COMPLETED, AgentRunStatus.FAILED, AgentRunStatus.CANCELLED}: - run.finished_at = heartbeat_at - _snapshot_last_published.pop(str(run_id), None) - - snapshot = await self.repo.get_snapshot(run_id) - if snapshot: - snapshot.status = status.value - projection = dict(snapshot.projection or {}) - projection["status"] = status.value - if error_message: - meta = dict(projection.get("meta") or {}) - meta["error"] = error_message - projection["meta"] = meta - snapshot.projection = projection - - await self.db.commit() - try: - coros: list[Awaitable[Any]] = [] - if snapshot: - coros.append( - RedisClient.set_run_snapshot( - str(run.id), - _build_snapshot_dict(str(run.id), snapshot), - ) - ) - coros.append( - run_subscription_manager.broadcast_event( - str(run.id), - { - "type": "run_status", - "run_id": str(run.id), - "status": status.value, - "error_code": error_code, - "error_message": error_message, - }, - ) - ) - await asyncio.gather(*coros) - except Exception as exc: - logger.warning(f"Failed to publish run status to Redis/WS | run_id={run_id} | error={exc}") - return run - - async def touch_run_heartbeat( - self, - *, - run_id: uuid.UUID, - runtime_owner_id: Optional[str] = None, - ) -> Optional[AgentRun]: - """Update the heartbeat timestamp for an active run to prevent stale-run recovery. - - Only updates runs in QUEUED or RUNNING status. Also sets the - runtime_owner_id if not already assigned. - - Args: - run_id: Unique identifier of the run. - runtime_owner_id: Runtime instance ID to record as the owner. - - Returns: - The updated AgentRun, or None if the run was not found. - """ - run = await self.repo.get_run_for_update(run_id) - if not run: - return None - if run.status not in {AgentRunStatus.QUEUED, AgentRunStatus.RUNNING}: - return run - - run.runtime_owner_id = runtime_owner_id or run.runtime_owner_id or settings.run_runtime_instance_id - run.last_heartbeat_at = utc_now() - await self.db.commit() - return run - - async def recover_stale_incomplete_runs( - self, - *, - runtime_owner_id: str, - stale_before: datetime, - ) -> list[AgentRun]: - """Mark stale incomplete runs as FAILED and record recovery metadata. - - Finds runs whose last heartbeat is older than stale_before and - transitions them to FAILED with a ``runtime_recovered`` error code. - - Args: - runtime_owner_id: ID of the runtime performing the recovery. - stale_before: Cutoff timestamp; runs with an older heartbeat are - considered stale. - - Returns: - List of AgentRun records that were recovered (marked as FAILED). - """ - stale_runs = await self.repo.list_recoverable_stale_runs( - stale_before=stale_before, - ) - recovered: list[AgentRun] = [] - recovered_at = utc_now().isoformat() - for run in stale_runs: - result_summary = dict(run.result_summary or {}) - result_summary.update( - { - "recovered_by_runtime": runtime_owner_id, - "recovered_at": recovered_at, - "previous_runtime_owner_id": run.runtime_owner_id, - } - ) - error_message = ( - "Recovered stale run after runtime heartbeat timeout" - if run.runtime_owner_id - else "Recovered stale run without active runtime owner heartbeat" - ) - updated_run = await self.mark_status( - run_id=run.id, - user_id=run.user_id, - status=AgentRunStatus.FAILED, - error_code="runtime_recovered", - error_message=error_message, - result_summary=result_summary, - ) - if updated_run is not None: - recovered.append(updated_run) - return recovered - - async def append_event( - self, - *, - run_id: uuid.UUID, - event_type: str, - payload: dict[str, Any], - trace_id: Optional[uuid.UUID] = None, - observation_id: Optional[uuid.UUID] = None, - parent_observation_id: Optional[uuid.UUID] = None, - commit: bool = True, - ) -> AgentRunEvent: - """Append a new event to a run's event log and update its snapshot projection. - - Increments the run's sequence counter, applies the agent's reducer to - update the snapshot projection, and publishes the event via Redis and - WebSocket. Snapshot writes to Redis are throttled for content_delta events. - - Args: - run_id: Unique identifier of the run. - event_type: Type label for the event (e.g. "user_message_added", - "content_delta"). - payload: Arbitrary event data. - trace_id: Optional tracing ID for observability. - observation_id: Optional observation ID for observability. - parent_observation_id: Optional parent observation ID for nesting. - commit: Whether to commit the transaction and publish to Redis/WS. - - Returns: - The newly created AgentRunEvent. - - Raises: - ValueError: If the run is not found. - """ - run = await self.repo.get_run_for_update(run_id) - if not run: - raise ValueError(f"Run not found: {run_id}") - - next_seq = int(run.last_seq) + 1 - event = AgentRunEvent( - run_id=run.id, - seq=next_seq, - event_type=event_type, - payload=payload, - trace_id=trace_id, - observation_id=observation_id, - parent_observation_id=parent_observation_id, - ) - self.db.add(event) - run.last_seq = next_seq - run.last_heartbeat_at = utc_now() - - snapshot = await self.repo.get_snapshot(run.id) - if snapshot is None: - snapshot = AgentRunSnapshot( - run_id=run.id, - last_seq=0, - status=run.status.value, - projection={}, - ) - self.db.add(snapshot) - - definition = agent_registry.find(run.agent_name) - if definition is not None: - snapshot.projection = definition.reducer( - snapshot.projection, - event_type=event_type, - payload=payload, - status=run.status.value, - ) - snapshot.last_seq = next_seq - snapshot.status = run.status.value - - await self.db.flush() - if commit: - await self.db.commit() - try: - # Throttle Redis snapshot writes for content_delta — - # always publish the event, but skip the heavier snapshot - # cache refresh if we published one within the last 500ms. - run_id_str = str(run.id) - now = utc_now().timestamp() - should_publish_snapshot = True - if event_type == "content_delta": - last = _snapshot_last_published.get(run_id_str, 0.0) - if (now - last) < _SNAPSHOT_THROTTLE_SECONDS: - should_publish_snapshot = False - - if should_publish_snapshot: - await RedisClient.set_run_snapshot( - run_id_str, - _build_snapshot_dict(run_id_str, snapshot), - ) - _snapshot_last_published[run_id_str] = now - - await asyncio.gather( - RedisClient.publish_run_event( - run_id_str, - { - "run_id": run_id_str, - "seq": event.seq, - "event_type": event.event_type, - "data": event.payload, - }, - ), - run_subscription_manager.broadcast_event( - run_id_str, - { - "type": "event", - "run_id": run_id_str, - "seq": event.seq, - "event_type": event.event_type, - "data": event.payload, - "trace_id": str(event.trace_id) if event.trace_id else None, - "observation_id": str(event.observation_id) if event.observation_id else None, - "parent_observation_id": ( - str(event.parent_observation_id) if event.parent_observation_id else None - ), - "created_at": event.created_at.isoformat() if event.created_at else None, - }, - ), - ) - except Exception as exc: - logger.warning(f"Failed to publish run event to Redis/WS | run_id={run_id} | error={exc}") - return event diff --git a/backend/app/services/runner_factory.py b/backend/app/services/runner_factory.py new file mode 100644 index 000000000..e1e361eaf --- /dev/null +++ b/backend/app/services/runner_factory.py @@ -0,0 +1,13 @@ +"""Factory for creating ExecutionRunner with port adapters.""" + +from __future__ import annotations + +from sqlalchemy.ext.asyncio import AsyncSession + +from app.core.agent.cli_backends.execution_runner import ExecutionRunner +from app.services.execution_event_adapter import ExecutionEventAdapter +from app.services.execution_reader_adapter import ExecutionReaderAdapter + + +def create_execution_runner(db: AsyncSession) -> ExecutionRunner: + return ExecutionRunner(ExecutionEventAdapter(db), ExecutionReaderAdapter(db)) diff --git a/backend/app/services/sandbox_manager.py b/backend/app/services/sandbox_manager.py index aed8b0915..bb7fa254e 100644 --- a/backend/app/services/sandbox_manager.py +++ b/backend/app/services/sandbox_manager.py @@ -7,12 +7,11 @@ from datetime import datetime, timezone from typing import Any, Dict, Optional, cast -from fastapi import status from loguru import logger from sqlalchemy import CursorResult, delete, select, update from sqlalchemy.ext.asyncio import AsyncSession -from app.common.exceptions import AppException +from app.common.app_errors import ServiceUnavailableError from app.core.agent.backends.constants import ( DEFAULT_USER_SANDBOX_AUTO_REMOVE, DEFAULT_USER_SANDBOX_CPU_LIMIT, @@ -240,10 +239,7 @@ async def _ensure_sandbox_running_locked(self, user_id: str) -> PydanticSandboxA except Exception as e: logger.error(f"Failed to start sandbox for user {user_id}: {e}") await self._update_status(sandbox_record.id, InstanceStatus.FAILED, error_message=str(e)) - raise AppException( - status_code=status.HTTP_503_SERVICE_UNAVAILABLE, - message=_classify_sandbox_error(e), - ) + raise ServiceUnavailableError(message=_classify_sandbox_error(e)) @staticmethod def _force_remove_container(container_id: str) -> None: diff --git a/backend/app/services/session_service.py b/backend/app/services/session_service.py deleted file mode 100644 index 3a6094cb5..000000000 --- a/backend/app/services/session_service.py +++ /dev/null @@ -1,201 +0,0 @@ -"""Session management service.""" - -import uuid -from pathlib import Path -from typing import Any, Dict, List, Optional - -from loguru import logger -from sqlalchemy import func, select -from sqlalchemy.ext.asyncio import AsyncSession - -from app.core.ai_adapter import AgentBridge -from app.core.settings import settings -from app.models import Conversation, Message -from app.schemas.common import SessionCreate, SessionResponse -from app.utils.datetime import utc_now - - -class SessionService: - """Service for managing user sessions.""" - - def __init__(self, db: AsyncSession): - self.db = db - - async def _get_conversation( - self, session_id: str, user_id=None, active_only: bool = True - ) -> Optional[Conversation]: - where_clauses = [Conversation.thread_id == session_id] - if user_id is not None: - where_clauses.append(Conversation.user_id == user_id) - if active_only: - where_clauses.append(Conversation.is_active == 1) - result = await self.db.execute(select(Conversation).where(*where_clauses)) - return result.scalar_one_or_none() - - async def create_session(self, session_data: SessionCreate, user_id) -> SessionResponse: - """Create a new session.""" - session_id = str(uuid.uuid4()) - - # Create workspace directory - workspace_root = Path(settings.WORKSPACE_ROOT) - workspace_path = session_data.workspace_path or str(workspace_root / session_id) - workspace = Path(workspace_path) - workspace.mkdir(parents=True, exist_ok=True) - - # Create database record - conversation = Conversation( - thread_id=session_id, - user_id=user_id, - title=session_data.title or "New Session", - meta_data={"workspace_path": str(workspace)}, - is_active=1, - ) - - self.db.add(conversation) - await self.db.commit() - await self.db.refresh(conversation) - - return await self._to_response(conversation) - - async def get_session(self, session_id: str) -> Optional[SessionResponse]: - """Get session by ID.""" - conversation = await self._get_conversation(session_id, user_id=None, active_only=True) - if not conversation: - return None - - return await self._to_response(conversation) - - async def get_session_for_user(self, session_id: str, user_id) -> Optional[SessionResponse]: - """Get a session by ID, ensuring it belongs to the given user.""" - conversation = await self._get_conversation(session_id, user_id=user_id, active_only=True) - if not conversation: - return None - return await self._to_response(conversation) - - async def get_user_sessions(self, user_id) -> List[SessionResponse]: - """Get all sessions for a user.""" - result = await self.db.execute( - select(Conversation) - .where(Conversation.user_id == user_id, Conversation.is_active == 1) - .order_by(Conversation.updated_at.desc()) - ) - conversations = result.scalars().all() - responses: List[SessionResponse] = [] - for conv in conversations: - responses.append(await self._to_response(conv)) - return responses - - async def update_session_title(self, session_id: str, title: str, user_id=None) -> Optional[SessionResponse]: - """Update session title.""" - conversation = await self._get_conversation(session_id, user_id=user_id, active_only=False) - - if not conversation: - return None - - conversation.title = title - conversation.updated_at = utc_now() - await self.db.commit() - await self.db.refresh(conversation) - - return await self._to_response(conversation) - - async def delete_session(self, session_id: str, user_id=None) -> bool: - """Delete a session.""" - conversation = await self._get_conversation(session_id, user_id=user_id, active_only=False) - - if not conversation: - return False - - # Mark as inactive (soft delete) - conversation.is_active = 0 - conversation.updated_at = utc_now() - await self.db.commit() - - # Optionally clean up files - try: - workspace_path = (conversation.meta_data or {}).get("workspace_path") - if workspace_path: - workspace = Path(workspace_path) - else: - workspace = Path(settings.WORKSPACE_ROOT) / session_id - if workspace.exists() and workspace.is_dir(): - import shutil - - shutil.rmtree(workspace) - except Exception: - logger.debug("Workspace directory cleanup failed for session %s", session_id, exc_info=True) - - return True - - async def add_message( - self, - session_id: str, - content: str, - role: str, - metadata: Optional[Dict[str, Any]] = None, - ) -> Message: - """Add a message to the session.""" - message = Message( - thread_id=session_id, - content=content, - role=role, - meta_data=metadata or {}, - ) - - self.db.add(message) - - # Update session timestamp - result = await self.db.execute(select(Conversation).where(Conversation.thread_id == session_id)) - conversation = result.scalar_one_or_none() - if conversation: - conversation.updated_at = utc_now() - - await self.db.commit() - await self.db.refresh(message) - - return message - - async def get_session_messages(self, session_id: str, limit: int = 100, user_id=None) -> List[Message]: - """Get messages for a session.""" - if user_id is not None: - conv = await self._get_conversation(session_id, user_id=user_id, active_only=False) - if not conv: - return [] - result = await self.db.execute( - select(Message).where(Message.thread_id == session_id).order_by(Message.created_at.desc()).limit(limit) - ) - return list(result.scalars().all()) - - async def get_ai_adapter(self, session_id: str, user_id=None) -> Optional[AgentBridge]: - """Get AI adapter for a session (lightweight, no CLI coupling).""" - conversation = await self._get_conversation(session_id, user_id=user_id, active_only=True) - if not conversation: - return None - workspace_path = (conversation.meta_data or {}).get("workspace_path") - if not workspace_path: - workspace_path = str(Path(settings.WORKSPACE_ROOT) / session_id) - # Construct adapter using the decoupled AgentBridge (engine may be injected elsewhere) - return AgentBridge(session_id, workspace_path) - - async def _to_response(self, conversation: Conversation) -> SessionResponse: - """Convert database model to response schema.""" - count_result = await self.db.execute( - select(func.count(Message.id)).where(Message.thread_id == conversation.thread_id) - ) - message_count = count_result.scalar() or 0 - workspace_path = (conversation.meta_data or {}).get("workspace_path") or str( - Path(settings.WORKSPACE_ROOT) / conversation.thread_id - ) - - return SessionResponse( - success=True, - code=200, - msg="Success", - session_id=conversation.thread_id, - title=conversation.title, - workspace_path=workspace_path, - is_active=conversation.is_active == 1, - created_at=conversation.created_at, - updated_at=conversation.updated_at, - message_count=message_count, - ) diff --git a/backend/app/services/skill_collaborator_service.py b/backend/app/services/skill_collaborator_service.py index 2d8a69ec0..1b87ec5b7 100644 --- a/backend/app/services/skill_collaborator_service.py +++ b/backend/app/services/skill_collaborator_service.py @@ -5,7 +5,7 @@ import uuid from typing import List -from app.common.exceptions import BadRequestException, ForbiddenException, NotFoundException +from app.common.app_errors import AccessDeniedError, InvalidRequestError, NotFoundError from app.common.skill_permissions import check_skill_access from app.models.skill import Skill from app.models.skill_collaborator import CollaboratorRole, SkillCollaborator @@ -56,11 +56,17 @@ async def add_collaborator( ) if target_user_id == skill.owner_id: - raise BadRequestException("Cannot add the owner as a collaborator") + raise InvalidRequestError( + "Cannot add the owner as a collaborator", code="SKILL_OWNER_COLLABORATOR_FORBIDDEN" + ) existing = await self.repo.get_by_skill_and_user(skill_id, target_user_id) if existing: - raise BadRequestException("User is already a collaborator") + raise InvalidRequestError( + "User is already a collaborator", + code="SKILL_COLLABORATOR_ALREADY_EXISTS", + data={"user_id": target_user_id}, + ) collab = SkillCollaborator( skill_id=skill_id, @@ -92,7 +98,11 @@ async def update_collaborator_role( collab = await self.repo.get_by_skill_and_user(skill_id, target_user_id) if not collab: - raise NotFoundException("Collaborator not found") + raise NotFoundError( + "Collaborator not found", + code="SKILL_COLLABORATOR_NOT_FOUND", + data={"user_id": target_user_id}, + ) collab.role = new_role await self.db.commit() @@ -117,7 +127,11 @@ async def remove_collaborator( deleted = await self.repo.delete_by_skill_and_user(skill_id, target_user_id) if not deleted: - raise NotFoundException("Collaborator not found") + raise NotFoundError( + "Collaborator not found", + code="SKILL_COLLABORATOR_NOT_FOUND", + data={"user_id": target_user_id}, + ) await self.db.commit() async def transfer_ownership( @@ -130,12 +144,16 @@ async def transfer_ownership( skill = await self._get_skill_or_404(skill_id) if skill.owner_id != current_user_id: - raise ForbiddenException("Only the owner can transfer ownership") + raise AccessDeniedError("Only the owner can transfer ownership", code="SKILL_OWNER_TRANSFER_FORBIDDEN") # Check new owner doesn't have a skill with the same name existing = await self.skill_repo.get_by_name_and_owner(skill.name, new_owner_id) if existing: - raise BadRequestException(f"New owner already has a skill named '{skill.name}'") + raise InvalidRequestError( + f"New owner already has a skill named '{skill.name}'", + code="SKILL_NAME_ALREADY_EXISTS", + data={"name": skill.name, "owner_id": new_owner_id}, + ) # Remove new owner from collaborators if present await self.repo.delete_by_skill_and_user(skill_id, new_owner_id) @@ -161,5 +179,5 @@ async def transfer_ownership( async def _get_skill_or_404(self, skill_id: uuid.UUID) -> Skill: skill = await self.skill_repo.get(skill_id) if not skill: - raise NotFoundException("Skill not found") + raise NotFoundError("Skill not found", code="SKILL_NOT_FOUND", data={"skill_id": str(skill_id)}) return skill # type: ignore[return-value,no-any-return] diff --git a/backend/app/services/skill_service.py b/backend/app/services/skill_service.py index 68d44b5c6..658cc4913 100644 --- a/backend/app/services/skill_service.py +++ b/backend/app/services/skill_service.py @@ -10,7 +10,7 @@ from loguru import logger -from app.common.exceptions import BadRequestException, ForbiddenException, NotFoundException +from app.common.app_errors import AccessDeniedError, InvalidRequestError, NotFoundError from app.common.skill_permissions import check_skill_access from app.core.skill.validators import ( truncate_compatibility, @@ -40,6 +40,15 @@ def __init__(self, db): self.repo = SkillRepository(db) self.file_repo = SkillFileRepository(db) + def _invalid_import_files_error(self, invalid_files: List[str]) -> InvalidRequestError: + invalid_list = "\n".join(f" - {file_name}" for file_name in invalid_files) + return InvalidRequestError( + f"The following files cannot be imported (binary files or system files):\n{invalid_list}\n\n" + f"Skill import only supports text files (.py, .md, .json, .yaml, etc.)", + code="SKILL_IMPORT_FILES_INVALID", + data={"files": invalid_files}, + ) + async def list_skills( self, current_user_id: Optional[str] = None, @@ -62,7 +71,7 @@ async def get_skill( """Get Skill details""" skill = await self.repo.get_with_files(skill_id) if not skill or not isinstance(skill, Skill): - raise NotFoundException("Skill not found") + raise NotFoundError("Skill not found", code="SKILL_NOT_FOUND", data={"skill_id": str(skill_id)}) # Permission check: collaborator-aware if current_user_id: @@ -73,7 +82,7 @@ async def get_skill( CollaboratorRole.viewer, ) elif not skill.is_public: - raise ForbiddenException("You don't have permission to access this skill") + raise AccessDeniedError("You don't have permission to access this skill", code="SKILL_ACCESS_DENIED") # Type assertion: get_with_files returns Optional[Skill], we've already checked it's not None skill = await self._attach_latest_version(skill) @@ -183,7 +192,11 @@ async def create_skill( is_valid, error = validate_skill_name(name) if not is_valid: logger.warning(f"Invalid skill name rejected: {name!r} — {error}") - raise BadRequestException(f"Invalid skill name: {error}") + raise InvalidRequestError( + f"Invalid skill name: {error}", + code="SKILL_NAME_INVALID", + data={"validation_error": error, "name": name}, + ) # Validate and truncate description per Agent Skills specification is_valid, error = validate_skill_description(description) @@ -203,7 +216,11 @@ async def create_skill( # Check if Skill with same name exists (same owner) existing = await self.repo.get_by_name_and_owner(name, owner_id) if existing: - raise BadRequestException(f"Skill name '{name}' already exists for this owner") + raise InvalidRequestError( + f"Skill name '{name}' already exists for this owner", + code="SKILL_NAME_ALREADY_EXISTS", + data={"name": name}, + ) skill = Skill( name=name, @@ -268,11 +285,7 @@ async def create_skill( # If there are invalid files, raise an error if invalid_files: - invalid_list = "\n".join(f" - {f}" for f in invalid_files) - raise BadRequestException( - f"The following files cannot be imported (binary files or system files):\n{invalid_list}\n\n" - f"Skill import only supports text files (.py, .md, .json, .yaml, etc.)" - ) + raise self._invalid_import_files_error(invalid_files) await self.db.commit() await self.db.refresh(skill) @@ -305,7 +318,7 @@ async def update_skill( """ skill = await self.repo.get(skill_id) if not skill: - raise NotFoundException("Skill not found") + raise NotFoundError("Skill not found", code="SKILL_NOT_FOUND", data={"skill_id": str(skill_id)}) # Permission check: collaborator-aware (editor role) await check_skill_access( @@ -358,10 +371,18 @@ async def update_skill( is_valid, error = validate_skill_name(name) if not is_valid: logger.warning(f"Invalid skill name rejected: {name!r} — {error}") - raise BadRequestException(f"Invalid skill name: {error}") + raise InvalidRequestError( + f"Invalid skill name: {error}", + code="SKILL_NAME_INVALID", + data={"validation_error": error, "name": name}, + ) existing = await self.repo.get_by_name_and_owner(name, skill.owner_id) if existing: - raise BadRequestException(f"Skill name '{name}' already exists for this owner") + raise InvalidRequestError( + f"Skill name '{name}' already exists for this owner", + code="SKILL_NAME_ALREADY_EXISTS", + data={"name": name}, + ) skill.name = name # Validate and update description if provided @@ -451,11 +472,7 @@ async def update_skill( # If there are invalid files, raise an error if invalid_files: - invalid_list = "\n".join(f" - {f}" for f in invalid_files) - raise BadRequestException( - f"The following files cannot be imported (binary files or system files):\n{invalid_list}\n\n" - f"Skill import only supports text files (.py, .md, .json, .yaml, etc.)" - ) + raise self._invalid_import_files_error(invalid_files) await self.db.commit() await self.db.refresh(skill) @@ -470,11 +487,11 @@ async def delete_skill( """Delete Skill""" skill = await self.repo.get(skill_id) if not skill: - raise NotFoundException("Skill not found") + raise NotFoundError("Skill not found", code="SKILL_NOT_FOUND", data={"skill_id": str(skill_id)}) # Permission check: Only owner can delete if skill.owner_id != current_user_id: - raise ForbiddenException("Only the owner can delete a skill") + raise AccessDeniedError("Only the owner can delete a skill", code="SKILL_DELETE_FORBIDDEN") # Delete associated files await self.file_repo.delete_by_skill(skill_id) @@ -504,7 +521,7 @@ async def add_file( """Add file to Skill""" skill = await self.repo.get(skill_id) if not skill: - raise NotFoundException("Skill not found") + raise NotFoundError("Skill not found", code="SKILL_NOT_FOUND", data={"skill_id": str(skill_id)}) # Permission check: collaborator-aware (editor role) await check_skill_access( @@ -516,14 +533,20 @@ async def add_file( # Check if it's a system file if is_system_file(path) or is_system_file(file_name): - raise BadRequestException(f"File '{path}' is a system file and cannot be imported") + raise InvalidRequestError( + f"File '{path}' is a system file and cannot be imported", + code="SKILL_SYSTEM_FILE_IMPORT_FORBIDDEN", + data={"path": path}, + ) # Validate content if provided if content is not None: is_valid, error_msg = is_valid_text_content(content) if not is_valid: - raise BadRequestException( - f"File '{path}' {error_msg}. Skill import only supports text files (.py, .md, .json, .yaml, etc.)" + raise InvalidRequestError( + f"File '{path}' {error_msg}. Skill import only supports text files (.py, .md, .json, .yaml, etc.)", + code="SKILL_FILE_CONTENT_INVALID", + data={"path": path}, ) # Log warning for uncommon file extensions (but don't reject) @@ -560,11 +583,11 @@ async def delete_file( """Delete file""" file_obj = await self.file_repo.get(file_id) if not file_obj: - raise NotFoundException("Skill file not found") + raise NotFoundError("Skill file not found", code="SKILL_FILE_NOT_FOUND", data={"file_id": str(file_id)}) skill = await self.repo.get(file_obj.skill_id) if not skill: - raise NotFoundException("Skill not found") + raise NotFoundError("Skill not found", code="SKILL_NOT_FOUND", data={"skill_id": str(file_obj.skill_id)}) # Permission check: collaborator-aware (editor role) await check_skill_access( @@ -588,11 +611,11 @@ async def update_file( """Update file content""" file_obj = await self.file_repo.get(file_id) if not file_obj: - raise NotFoundException("Skill file not found") + raise NotFoundError("Skill file not found", code="SKILL_FILE_NOT_FOUND", data={"file_id": str(file_id)}) skill = await self.repo.get(file_obj.skill_id) if not skill: - raise NotFoundException("Skill not found") + raise NotFoundError("Skill not found", code="SKILL_NOT_FOUND", data={"skill_id": str(file_obj.skill_id)}) # Permission check: collaborator-aware (editor role) await check_skill_access( @@ -605,7 +628,11 @@ async def update_file( # Check if it's a system file (if path is being updated) if path is not None: if is_system_file(path) or is_system_file(file_obj.file_name): - raise BadRequestException(f"File '{path}' is a system file and cannot be imported") + raise InvalidRequestError( + f"File '{path}' is a system file and cannot be imported", + code="SKILL_SYSTEM_FILE_IMPORT_FORBIDDEN", + data={"path": path}, + ) # Log warning for uncommon file extensions (but don't reject) is_common, warning = validate_file_extension(path) @@ -616,8 +643,10 @@ async def update_file( # Validate content is_valid, error_msg = is_valid_text_content(content) if not is_valid: - raise BadRequestException( - f"File '{file_obj.path}' {error_msg}. Skill import only supports text files (.py, .md, .json, .yaml, etc.)" + raise InvalidRequestError( + f"File '{file_obj.path}' {error_msg}. Skill import only supports text files (.py, .md, .json, .yaml, etc.)", + code="SKILL_FILE_CONTENT_INVALID", + data={"path": file_obj.path}, ) file_obj.content = content diff --git a/backend/app/services/skill_version_service.py b/backend/app/services/skill_version_service.py index 6aaf27a59..e0a92e9b2 100644 --- a/backend/app/services/skill_version_service.py +++ b/backend/app/services/skill_version_service.py @@ -8,7 +8,7 @@ import semver -from app.common.exceptions import BadRequestException, NotFoundException +from app.common.app_errors import InvalidRequestError, NotFoundError from app.common.skill_permissions import check_skill_access from app.models.skill import Skill, SkillFile from app.models.skill_collaborator import CollaboratorRole @@ -51,17 +51,27 @@ async def publish_version( try: new_ver = semver.Version.parse(version_str) except ValueError: - raise BadRequestException(f"Invalid version format: '{version_str}'. Must be MAJOR.MINOR.PATCH") + raise InvalidRequestError( + f"Invalid version format: '{version_str}'. Must be MAJOR.MINOR.PATCH", + code="SKILL_VERSION_FORMAT_INVALID", + data={"version": version_str}, + ) # Reject pre-release / build metadata if new_ver.prerelease or new_ver.build: - raise BadRequestException("Pre-release and build metadata are not supported") + raise InvalidRequestError( + "Pre-release and build metadata are not supported", code="SKILL_VERSION_PRERELEASE_UNSUPPORTED" + ) # Check > highest existing highest_str = await self.repo.get_highest_version_str(skill_id) if highest_str: highest = semver.Version.parse(highest_str) if new_ver <= highest: - raise BadRequestException(f"Version {version_str} must be greater than current highest {highest_str}") + raise InvalidRequestError( + f"Version {version_str} must be greater than current highest {highest_str}", + code="SKILL_VERSION_NOT_GREATER_THAN_LATEST", + data={"version": version_str, "latest_version": highest_str}, + ) # Snapshot sv = SkillVersion( @@ -141,7 +151,11 @@ async def get_version( ) sv = await self.repo.get_by_version(skill_id, version_str) if not sv: - raise NotFoundException(f"Version {version_str} not found") + raise NotFoundError( + "Skill version not found", + code="SKILL_VERSION_NOT_FOUND", + data={"skill_id": str(skill_id), "version": version_str}, + ) return sv # type: ignore[return-value,no-any-return] async def get_latest_version( @@ -163,7 +177,9 @@ async def get_latest_version( ) sv = await self.repo.get_latest(skill_id) if not sv: - raise NotFoundException("No published versions found") + raise NotFoundError( + "No published versions found", code="SKILL_VERSION_NOT_FOUND", data={"skill_id": str(skill_id)} + ) return sv # type: ignore[return-value,no-any-return] async def delete_version( @@ -186,7 +202,11 @@ async def delete_version( ) sv = await self.repo.get_by_version(skill_id, version_str) if not sv: - raise NotFoundException(f"Version {version_str} not found") + raise NotFoundError( + "Skill version not found", + code="SKILL_VERSION_NOT_FOUND", + data={"skill_id": str(skill_id), "version": version_str}, + ) await self.db.delete(sv) await self.db.commit() @@ -210,7 +230,11 @@ async def restore_draft( ) sv = await self.repo.get_by_version(skill_id, version_str) if not sv: - raise NotFoundException(f"Version {version_str} not found") + raise NotFoundError( + "Skill version not found", + code="SKILL_VERSION_NOT_FOUND", + data={"skill_id": str(skill_id), "version": version_str}, + ) # Overwrite draft skill.name = sv.skill_name @@ -245,11 +269,11 @@ async def restore_draft( async def _get_skill_or_404(self, skill_id: uuid.UUID) -> Skill: skill = await self.skill_repo.get(skill_id) if not skill: - raise NotFoundException("Skill not found") + raise NotFoundError("Skill not found", code="SKILL_NOT_FOUND", data={"skill_id": str(skill_id)}) return skill # type: ignore[return-value,no-any-return] async def _get_skill_with_files_or_404(self, skill_id: uuid.UUID) -> Skill: skill = await self.skill_repo.get_with_files(skill_id) if not skill: - raise NotFoundException("Skill not found") + raise NotFoundError("Skill not found", code="SKILL_NOT_FOUND", data={"skill_id": str(skill_id)}) return skill # type: ignore[return-value,no-any-return] diff --git a/backend/app/services/task_activity_service.py b/backend/app/services/task_activity_service.py new file mode 100644 index 000000000..b1ad64cc1 --- /dev/null +++ b/backend/app/services/task_activity_service.py @@ -0,0 +1,272 @@ +""" +Task activity service — create/read/update/delete. +""" + +from __future__ import annotations + +import uuid +from datetime import datetime +from typing import Optional + +from loguru import logger +from sqlalchemy.ext.asyncio import AsyncSession + +from app.common.app_errors import NotFoundError +from app.models.task import Task, TaskStatus +from app.models.task_activity import ActivityAuthorType, ActivityType, TaskActivity +from app.repositories.task import TaskRepository +from app.repositories.task_activity import TaskActivityRepository + + +class TaskActivityService: + """Manages task activities.""" + + def __init__(self, db: AsyncSession): + self.db = db + self.repo = TaskActivityRepository(db) + self.task_repo = TaskRepository(db) + + async def create_activity( + self, + *, + task_id: uuid.UUID, + workspace_id: uuid.UUID, + author_type: ActivityAuthorType, + author_id: str, + content: str, + activity_type: ActivityType = ActivityType.COMMENT, + parent_activity_id: Optional[uuid.UUID] = None, + ) -> tuple[TaskActivity, Task, bool, list[uuid.UUID]]: + """Returns (activity, task, should_dispatch_agent, mentioned_agent_ids).""" + task = await self.task_repo.get_by_id_and_workspace(task_id, workspace_id) + if not task: + raise NotFoundError("Task not found", code="TASK_NOT_FOUND", data={"task_id": str(task_id)}) + + if parent_activity_id is not None: + parent = await self.repo.get(parent_activity_id) + if parent and parent.parent_activity_id is not None: + parent_activity_id = parent.parent_activity_id + + activity = TaskActivity( + task_id=task_id, + workspace_id=workspace_id, + author_type=author_type, + author_id=author_id, + content=content, + type=activity_type, + parent_activity_id=parent_activity_id, + ) + self.db.add(activity) + await self.db.commit() + await self.db.refresh(activity) + + should_dispatch = False + mentioned_agent_ids: list[uuid.UUID] = [] + + if author_type == ActivityAuthorType.MEMBER and activity_type == ActivityType.COMMENT: + should_dispatch = self._should_enqueue_on_activity(task) + + from app.utils.mentions import agent_mentions + + mentions = agent_mentions(content) + seen: set[uuid.UUID] = set() + for m in mentions: + if m.id != task.agent_id and m.id not in seen: + seen.add(m.id) + mentioned_agent_ids.append(m.id) + + return activity, task, should_dispatch, mentioned_agent_ids + + async def list_activities( + self, + *, + task_id: uuid.UUID, + workspace_id: uuid.UUID, + cursor: Optional[str] = None, + limit: int = 50, + ) -> tuple[list[TaskActivity], bool, Optional[str]]: + """Return (activities, has_more, next_cursor).""" + task = await self.task_repo.get_by_id_and_workspace(task_id, workspace_id) + if not task: + raise NotFoundError("Task not found", code="TASK_NOT_FOUND", data={"task_id": str(task_id)}) + + cursor_dt = None + if cursor: + cursor_dt = datetime.fromisoformat(cursor) + + activities = list(await self.repo.list_by_task(task_id, cursor=cursor_dt, limit=limit + 1, order_asc=True)) + + has_more = len(activities) > limit + if has_more: + activities = activities[:limit] + + next_cursor = activities[-1].created_at.isoformat() if has_more and activities else None + return activities, has_more, next_cursor + + async def _get_owned_activity( + self, + activity_id: uuid.UUID, + task_id: uuid.UUID, + workspace_id: uuid.UUID, + author_id: str, + ) -> Optional[TaskActivity]: + task = await self.task_repo.get_by_id_and_workspace(task_id, workspace_id) + if not task: + return None + activity = await self.repo.get_by_id_and_task(activity_id, task_id) + if not activity: + return None + if activity.author_id != author_id: + raise PermissionError("Only the author can modify this activity") + return activity + + async def update_activity( + self, + *, + activity_id: uuid.UUID, + task_id: uuid.UUID, + workspace_id: uuid.UUID, + author_id: str, + content: str, + ) -> Optional[TaskActivity]: + activity = await self._get_owned_activity(activity_id, task_id, workspace_id, author_id) + if not activity: + return None + activity.content = content + await self.db.commit() + await self.db.refresh(activity) + return activity + + async def delete_activity( + self, + *, + activity_id: uuid.UUID, + task_id: uuid.UUID, + workspace_id: uuid.UUID, + author_id: str, + ) -> bool: + activity = await self._get_owned_activity(activity_id, task_id, workspace_id, author_id) + if not activity: + return False + await self.db.delete(activity) + await self.db.commit() + return True + + @staticmethod + def _should_enqueue_on_activity(task: Task) -> bool: + if task.agent_id is None: + return False + if task.status in {TaskStatus.DONE, TaskStatus.CANCELLED, TaskStatus.BACKLOG}: + return False + if task.latest_run_id is not None and task.status == TaskStatus.IN_PROGRESS: + return False + return True + + async def post_run_activity( + self, + *, + run, + result_status: str, + result_output: str = "", + error_message: str = "", + ) -> Optional[TaskActivity]: + """Auto-post agent activity after run completion.""" + if not run.task_id: + return None + + agent_id = str(run.created_by) if run.created_by else None + if not agent_id: + return None + + if result_status == "succeeded": + if run.started_at: + already = await self.repo.has_agent_posted_since(run.task_id, agent_id, run.started_at) + if already: + return None + content = result_output.strip() if result_output else "Run completed." + activity_type = ActivityType.PROGRESS_UPDATE + elif result_status == "failed": + content = error_message.strip() if error_message else "Run failed." + activity_type = ActivityType.SYSTEM + else: + return None + + # Need workspace_id — fetch task + from sqlalchemy import select + + from app.models.task import Task + + result = await self.db.execute(select(Task).where(Task.id == run.task_id)) + task = result.scalar_one_or_none() + if not task: + return None + + activity = TaskActivity( + task_id=run.task_id, + workspace_id=task.workspace_id, + author_type=ActivityAuthorType.AGENT, + author_id=agent_id, + content=content[:5000], + type=activity_type, + parent_activity_id=None, + ) + self.db.add(activity) + await self.db.commit() + await self.db.refresh(activity) + logger.info(f"Auto-posted {activity_type.value} activity {activity.id} for run {run.id}") + return activity + + async def post_execution_activity( + self, + *, + execution, + task_id: uuid.UUID, + result_status: str, + result_output: str = "", + error_message: str = "", + ) -> Optional[TaskActivity]: + """Post an activity after execution completion.""" + agent_id = str(execution.agent_id) if hasattr(execution, "agent_id") and execution.agent_id else None + if not agent_id: + # Try to get from created_by + agent_id = str(execution.created_by) if hasattr(execution, "created_by") and execution.created_by else None + if not agent_id: + return None + + if result_status == "succeeded": + if hasattr(execution, "started_at") and execution.started_at: + already = await self.repo.has_agent_posted_since(task_id, agent_id, execution.started_at) + if already: + return None + content = result_output.strip() if result_output else "Execution completed." + activity_type = ActivityType.PROGRESS_UPDATE + elif result_status == "failed": + content = error_message.strip() if error_message else "Execution failed." + activity_type = ActivityType.SYSTEM + else: + return None + + # Need workspace_id — fetch task + from sqlalchemy import select + + from app.models.task import Task + + result = await self.db.execute(select(Task).where(Task.id == task_id)) + task = result.scalar_one_or_none() + if not task: + return None + + activity = TaskActivity( + task_id=task_id, + workspace_id=task.workspace_id, + author_type=ActivityAuthorType.AGENT, + author_id=agent_id, + content=content[:5000], + type=activity_type, + parent_activity_id=None, + ) + self.db.add(activity) + await self.db.commit() + await self.db.refresh(activity) + logger.info(f"Auto-posted {activity_type.value} activity {activity.id} for execution {execution.id}") + return activity diff --git a/backend/app/services/task_service.py b/backend/app/services/task_service.py new file mode 100644 index 000000000..1863c1401 --- /dev/null +++ b/backend/app/services/task_service.py @@ -0,0 +1,174 @@ +""" +Task service layer — pure CRUD + status machine. + +Execution dispatch logic lives in ExecutionLifecycleService. +""" + +from __future__ import annotations + +import uuid +from typing import Any, Optional + +from loguru import logger +from sqlalchemy.ext.asyncio import AsyncSession + +from app.common.app_errors import InvalidRequestError, NotFoundError +from app.core.state_machines.engine import InvalidTransition +from app.core.state_machines.transitions import transition_task +from app.models.task import Task, TaskPriority, TaskStatus +from app.repositories.task import TaskRepository + + +class TaskService: + """Manages task CRUD and status transitions.""" + + def __init__(self, db: AsyncSession): + self.db = db + self.repo = TaskRepository(db) + + async def create_task( + self, + *, + workspace_id: uuid.UUID, + creator_id: str, + title: str, + description: Optional[str] = None, + goal: Optional[str] = None, + priority: TaskPriority = TaskPriority.NONE, + agent_id: Optional[uuid.UUID] = None, + parent_task_id: Optional[uuid.UUID] = None, + tags: Optional[list] = None, + position: float = 0.0, + auto_approve: bool = False, + ) -> Task: + task = Task( + workspace_id=workspace_id, + creator_id=creator_id, + title=title, + description=description, + goal=goal, + priority=priority, + status=TaskStatus.BACKLOG, + agent_id=agent_id, + parent_task_id=parent_task_id, + tags=tags, + position=position, + auto_approve=auto_approve, + ) + self.db.add(task) + await self.db.commit() + await self.db.refresh(task) + logger.info(f"Created task: {task.id} ({title})") + return task + + async def get_task(self, task_id: uuid.UUID, workspace_id: uuid.UUID) -> Optional[Task]: + return await self.repo.get_by_id_and_workspace(task_id, workspace_id) + + async def list_tasks( + self, + *, + workspace_id: uuid.UUID, + status: Optional[str] = None, + creator_id: Optional[str] = None, + agent_id: Optional[uuid.UUID] = None, + parent_task_id: Optional[uuid.UUID] = None, + limit: int = 50, + ) -> list[Task]: + return list( + await self.repo.list_by_workspace( + workspace_id=workspace_id, + status=status, + creator_id=creator_id, + agent_id=agent_id, + parent_task_id=parent_task_id, + limit=limit, + ) + ) + + @classmethod + def get_transitions(cls) -> dict[str, list[str]]: + from app.core.state_machines.definitions import TASK_STATES + + return {status: sorted(targets) for status, targets in TASK_STATES.items()} + + async def update_task( + self, + task_id: uuid.UUID, + workspace_id: uuid.UUID, + **kwargs: Any, + ) -> Optional[Task]: + task = await self.repo.get_by_id_and_workspace(task_id, workspace_id) + if not task: + return None + + new_status = kwargs.get("status") + if new_status is not None: + try: + new_status = TaskStatus(new_status) + except ValueError: + raise InvalidRequestError( + f"Invalid status: {new_status}", + code="TASK_STATUS_INVALID", + data={"status": str(new_status)}, + ) + + if new_status != task.status: + try: + await transition_task(task, new_status, self.db) + except InvalidTransition: + from_status = task.status.value if hasattr(task.status, "value") else str(task.status) + to_status = new_status.value if hasattr(new_status, "value") else str(new_status) + raise InvalidRequestError( + f"Cannot transition from '{task.status}' to '{new_status}'", + code="TASK_STATUS_TRANSITION_INVALID", + data={"from_status": from_status, "to_status": to_status}, + ) + + allowed = { + "title", + "description", + "goal", + "priority", + "agent_id", + "parent_task_id", + "due_date", + "position", + "tags", + "auto_approve", + } + for key, value in kwargs.items(): + if key in allowed: + setattr(task, key, value) + await self.db.commit() + await self.db.refresh(task) + return task + + async def cancel_task(self, task: Task) -> None: + """Cancel a task that has no active run.""" + await transition_task(task, "cancelled", self.db) + await self.db.commit() + + async def assign_to_agent( + self, + *, + task_id: uuid.UUID, + workspace_id: uuid.UUID, + agent_id: uuid.UUID, + ) -> Task: + """Assign a task to an agent.""" + from app.core.state_machines.transitions import transition_task + + task = await self.repo.get_for_update(task_id, workspace_id) + if not task: + raise NotFoundError("Task not found", code="TASK_NOT_FOUND", data={"task_id": str(task_id)}) + + task.agent_id = agent_id + # Only auto-transition to IN_PROGRESS from dispatchable states. + # Tasks in DONE or CANCELLED must be moved back to BACKLOG explicitly + # before they can be reassigned and re-dispatched. + if task.status in (TaskStatus.BACKLOG, TaskStatus.TODO): + await transition_task(task, "in_progress", self.db) + await self.db.commit() + await self.db.refresh(task) + logger.info(f"Assigned task {task_id} to agent {agent_id}") + return task diff --git a/backend/app/services/thread_service.py b/backend/app/services/thread_service.py new file mode 100644 index 000000000..8c2cb66f7 --- /dev/null +++ b/backend/app/services/thread_service.py @@ -0,0 +1,145 @@ +""" +ThreadService — manages Thread lifecycle. +""" + +from __future__ import annotations + +import uuid +from typing import List + +from loguru import logger +from sqlalchemy.ext.asyncio import AsyncSession + +from app.common.app_errors import NotFoundError +from app.models.thread import Thread +from app.repositories.thread import ThreadRepository +from app.schemas.thread import CreateThreadRequest, UpdateThreadRequest + + +class ThreadService: + """Manages Thread entities.""" + + def __init__(self, db: AsyncSession): + self.db = db + self.thread_repo = ThreadRepository(db) + + # ---- Thread CRUD ---- + + async def list_threads(self, agent_id: uuid.UUID) -> List[Thread]: + return await self.thread_repo.list_by_agent(agent_id) + + async def get_thread(self, thread_id: uuid.UUID) -> Thread: + thread = await self.thread_repo.get(thread_id) + if not thread: + raise NotFoundError("Thread not found", code="THREAD_NOT_FOUND", data={"thread_id": str(thread_id)}) + return thread + + async def create_thread( + self, + workspace_id: uuid.UUID, + user_id: str, + data: CreateThreadRequest, + ) -> Thread: + thread = await self.thread_repo.create( + { + "agent_id": data.agent_id, + "workspace_id": workspace_id, + "title": data.title, + "status": "active", + "created_by": user_id, + } + ) + await self.db.commit() + await self.db.refresh(thread) + logger.info(f"Created thread {thread.id} for agent {data.agent_id}") + return thread + + async def update_thread( + self, + thread_id: uuid.UUID, + data: UpdateThreadRequest, + ) -> Thread: + thread = await self.thread_repo.get(thread_id) + if not thread: + raise NotFoundError("Thread not found", code="THREAD_NOT_FOUND", data={"thread_id": str(thread_id)}) + + update_data = data.model_dump(exclude_unset=True) + if not update_data: + return thread + + updated = await self.thread_repo.update(thread_id, update_data) + assert updated is not None + await self.db.commit() + await self.db.refresh(updated) + return updated + + async def archive_thread(self, thread_id: uuid.UUID) -> Thread: + thread = await self.thread_repo.get(thread_id) + if not thread: + raise NotFoundError("Thread not found", code="THREAD_NOT_FOUND", data={"thread_id": str(thread_id)}) + + updated = await self.thread_repo.update(thread_id, {"status": "archived"}) + assert updated is not None + await self.db.commit() + await self.db.refresh(updated) + logger.info(f"Archived thread {thread_id}") + return updated + + # ---- Thread Events (aggregation) ---- + + async def list_thread_events( + self, + thread_id: uuid.UUID, + after_id: uuid.UUID | None = None, + limit: int = 200, + ) -> tuple[list[dict], int]: + """Aggregate execution events across all runs in a thread.""" + from sqlalchemy import and_, func, not_, select + + from app.models.agent_run import AgentRun + from app.models.execution import Execution, ExecutionEvent + + thread = await self.thread_repo.get(thread_id) + if not thread: + raise NotFoundError("Thread not found", code="THREAD_NOT_FOUND", data={"thread_id": str(thread_id)}) + + base_filter = and_( + AgentRun.thread_id == thread_id, + not_(ExecutionEvent.event_type.like("copilot_%")), + ) + + count_q = ( + select(func.count(ExecutionEvent.id)) + .join(Execution, ExecutionEvent.execution_id == Execution.id) + .join(AgentRun, Execution.run_id == AgentRun.id) + .where(base_filter) + ) + total = (await self.db.execute(count_q)).scalar() or 0 + + query = ( + select( + ExecutionEvent.id, + ExecutionEvent.execution_id, + ExecutionEvent.sequence_no, + ExecutionEvent.event_type, + ExecutionEvent.payload, + ExecutionEvent.created_at, + Execution.status.label("execution_status"), + AgentRun.id.label("run_id"), + ) + .join(Execution, ExecutionEvent.execution_id == Execution.id) + .join(AgentRun, Execution.run_id == AgentRun.id) + .where(base_filter) + .order_by(AgentRun.created_at, Execution.attempt_index, ExecutionEvent.sequence_no) + ) + + if after_id: + ref_event = ( + await self.db.execute(select(ExecutionEvent.created_at).where(ExecutionEvent.id == after_id)) + ).scalar() + if ref_event: + query = query.where(ExecutionEvent.created_at > ref_event) + + query = query.limit(limit) + rows = (await self.db.execute(query)).mappings().all() + return [dict(r) for r in rows], total diff --git a/backend/app/services/tool_service.py b/backend/app/services/tool_service.py index 1986282e5..a6f5ac23e 100644 --- a/backend/app/services/tool_service.py +++ b/backend/app/services/tool_service.py @@ -20,7 +20,7 @@ from loguru import logger from sqlalchemy.ext.asyncio import AsyncSession -from app.common.exceptions import BadRequestException +from app.common.app_errors import InvalidRequestError, ServiceUnavailableError from app.core.tools.tool import EnhancedTool, ToolFilter, ToolSourceType from app.core.tools.tool_registry import ToolRegistry, get_global_registry from app.models.enums import McpConnectionStatus @@ -232,7 +232,11 @@ async def refresh_server_tools( server = await self._server_service.get(server_id, user_id) if not server.enabled: - raise BadRequestException("Cannot refresh tools for disabled server") + raise InvalidRequestError( + "Cannot refresh tools for disabled server", + code="MCP_SERVER_REFRESH_DISABLED", + data={"server_id": str(server_id)}, + ) return await self._sync_server_tools(server) @@ -369,7 +373,11 @@ async def _sync_server_tools(self, server: McpServer) -> List[ToolInfo]: if not result.success: await self._server_service.update_connection_status(server.id, McpConnectionStatus.ERROR, result.error) - raise Exception(result.error) + raise ServiceUnavailableError( + "Failed to connect to MCP server", + code="MCP_SERVER_CONNECTION_FAILED", + data={"server_id": str(server.id), "server_name": server.name, "detail": result.error}, + ) # Unregister old tools await self._unregister_server_tools(server) @@ -594,7 +602,11 @@ async def initialize_mcp_tools_on_startup( f"{result.error}" ) if not allow_partial_failure: - raise Exception(f"Failed to load tools from MCP server {server.name}: {result.error}") + raise ServiceUnavailableError( + "Failed to load tools from MCP server during startup", + code="MCP_STARTUP_SYNC_FAILED", + data={"server_id": str(server.id), "server_name": server.name, "detail": result.error}, + ) break except Exception as e: diff --git a/backend/app/services/trace_service.py b/backend/app/services/trace_service.py deleted file mode 100644 index 728c75041..000000000 --- a/backend/app/services/trace_service.py +++ /dev/null @@ -1,263 +0,0 @@ -""" -Execution trace service. - -Encapsulate CRUD operations for ExecutionTrace / ExecutionObservation. -""" - -import uuid -from datetime import datetime, timezone -from typing import Optional - -from sqlalchemy import func, select -from sqlalchemy.orm import selectinload - -from app.models.execution_trace import ( - ExecutionObservation, - ExecutionTrace, - ObservationLevel, - ObservationStatus, - ObservationType, - TraceStatus, -) -from app.services.base import BaseService - - -class TraceService(BaseService): - """Execution trace service.""" - - # ==================== Query Helpers ==================== - - @staticmethod - def _apply_trace_filters( - stmt, - *, - graph_id: Optional[uuid.UUID] = None, - workspace_id: Optional[uuid.UUID] = None, - thread_id: Optional[str] = None, - ): - if graph_id is not None: - stmt = stmt.where(ExecutionTrace.graph_id == graph_id) - if workspace_id is not None: - stmt = stmt.where(ExecutionTrace.workspace_id == workspace_id) - if thread_id is not None: - stmt = stmt.where(ExecutionTrace.thread_id == thread_id) - return stmt - - # ==================== Trace CRUD ==================== - - async def create_trace( - self, - *, - trace_id: Optional[uuid.UUID] = None, - workspace_id: Optional[uuid.UUID] = None, - graph_id: Optional[uuid.UUID] = None, - thread_id: Optional[str] = None, - user_id: Optional[str] = None, - name: Optional[str] = None, - input_data: Optional[dict] = None, - metadata: Optional[dict] = None, - tags: Optional[list] = None, - ) -> ExecutionTrace: - """Create a new execution trace.""" - trace = ExecutionTrace( - id=trace_id or uuid.uuid4(), - workspace_id=workspace_id, - graph_id=graph_id, - thread_id=thread_id, - user_id=user_id, - name=name, - status=TraceStatus.RUNNING, - input=input_data, - metadata_=metadata, - tags=tags, - start_time=datetime.now(timezone.utc), - ) - self.db.add(trace) - await self.db.flush() - return trace - - async def complete_trace( - self, - trace_id: uuid.UUID, - *, - status: TraceStatus = TraceStatus.COMPLETED, - output: Optional[dict] = None, - total_tokens: Optional[int] = None, - total_cost: Optional[float] = None, - ) -> Optional[ExecutionTrace]: - """Complete an execution trace.""" - result = await self.db.execute(select(ExecutionTrace).where(ExecutionTrace.id == trace_id)) - trace = result.scalar_one_or_none() - if trace is None: - return None - - now = datetime.now(timezone.utc) - trace.status = status - trace.output = output - trace.end_time = now - trace.duration_ms = int((now - trace.start_time).total_seconds() * 1000) - trace.total_tokens = total_tokens - trace.total_cost = total_cost - trace.updated_at = now - await self.db.flush() - return trace - - async def get_trace(self, trace_id: uuid.UUID) -> Optional[ExecutionTrace]: - """Get a single Trace (with observations).""" - result = await self.db.execute( - select(ExecutionTrace) - .options(selectinload(ExecutionTrace.observations)) - .where(ExecutionTrace.id == trace_id) - ) - return result.scalar_one_or_none() - - async def list_traces( - self, - *, - graph_id: Optional[uuid.UUID] = None, - workspace_id: Optional[uuid.UUID] = None, - thread_id: Optional[str] = None, - limit: int = 20, - offset: int = 0, - ) -> list[ExecutionTrace]: - """List traces (without observation details, to reduce overhead).""" - stmt = select(ExecutionTrace).order_by(ExecutionTrace.start_time.desc()) - stmt = self._apply_trace_filters(stmt, graph_id=graph_id, workspace_id=workspace_id, thread_id=thread_id) - - stmt = stmt.limit(limit).offset(offset) - result = await self.db.execute(stmt) - return list(result.scalars().all()) - - async def count_traces( - self, - *, - graph_id: Optional[uuid.UUID] = None, - workspace_id: Optional[uuid.UUID] = None, - thread_id: Optional[str] = None, - ) -> int: - """Count traces matching the given filters (for pagination total).""" - stmt = select(func.count()).select_from(ExecutionTrace) - stmt = self._apply_trace_filters(stmt, graph_id=graph_id, workspace_id=workspace_id, thread_id=thread_id) - result = await self.db.execute(stmt) - total = result.scalar_one() - return int(total or 0) - - # ==================== Observation CRUD ==================== - - async def create_observation( - self, - *, - observation_id: Optional[uuid.UUID] = None, - trace_id: uuid.UUID, - parent_observation_id: Optional[uuid.UUID] = None, - type: ObservationType, - name: Optional[str] = None, - level: ObservationLevel = ObservationLevel.DEFAULT, - start_time: Optional[datetime] = None, - input_data: Optional[dict] = None, - model_name: Optional[str] = None, - model_provider: Optional[str] = None, - model_parameters: Optional[dict] = None, - metadata: Optional[dict] = None, - ) -> ExecutionObservation: - """Create a new Observation.""" - obs = ExecutionObservation( - id=observation_id or uuid.uuid4(), - trace_id=trace_id, - parent_observation_id=parent_observation_id, - type=type, - name=name, - level=level, - start_time=start_time or datetime.now(timezone.utc), - input=input_data, - model_name=model_name, - model_provider=model_provider, - model_parameters=model_parameters, - metadata_=metadata, - ) - self.db.add(obs) - await self.db.flush() - return obs - - async def complete_observation( - self, - observation_id: uuid.UUID, - *, - output: Optional[dict] = None, - level: Optional[ObservationLevel] = None, - status_message: Optional[str] = None, - prompt_tokens: Optional[int] = None, - completion_tokens: Optional[int] = None, - total_tokens: Optional[int] = None, - input_cost: Optional[float] = None, - output_cost: Optional[float] = None, - total_cost: Optional[float] = None, - completion_start_time: Optional[datetime] = None, - ) -> Optional[ExecutionObservation]: - """Complete an Observation.""" - result = await self.db.execute(select(ExecutionObservation).where(ExecutionObservation.id == observation_id)) - obs = result.scalar_one_or_none() - if obs is None: - return None - - now = datetime.now(timezone.utc) - obs.end_time = now - obs.duration_ms = int((now - obs.start_time).total_seconds() * 1000) - obs.status = ObservationStatus.FAILED if (level == ObservationLevel.ERROR) else ObservationStatus.COMPLETED - if output is not None: - obs.output = output - if level is not None: - obs.level = level - if status_message is not None: - obs.status_message = status_message - if prompt_tokens is not None: - obs.prompt_tokens = prompt_tokens - if completion_tokens is not None: - obs.completion_tokens = completion_tokens - if total_tokens is not None: - obs.total_tokens = total_tokens - if input_cost is not None: - obs.input_cost = input_cost - if output_cost is not None: - obs.output_cost = output_cost - if total_cost is not None: - obs.total_cost = total_cost - if completion_start_time is not None: - obs.completion_start_time = completion_start_time - - await self.db.flush() - return obs - - async def get_observations_for_trace(self, trace_id: uuid.UUID) -> list[ExecutionObservation]: - """Get all Observations for a Trace (flat list, sorted by time).""" - result = await self.db.execute( - select(ExecutionObservation) - .where(ExecutionObservation.trace_id == trace_id) - .order_by(ExecutionObservation.start_time.asc()) - ) - return list(result.scalars().all()) - - # ==================== Batch operations ==================== - - async def batch_create_trace_with_observations( - self, - trace: ExecutionTrace, - observations: list[ExecutionObservation], - ) -> ExecutionTrace: - """Batch-create a Trace and all its Observations (single commit).""" - self.db.add(trace) - for obs in observations: - self.db.add(obs) - await self.db.flush() - return trace - - async def aggregate_trace_tokens(self, trace_id: uuid.UUID) -> tuple[int, float]: - """Aggregate tokens and cost across all GENERATION Observations under a Trace.""" - observations = await self.get_observations_for_trace(trace_id) - total_tokens = 0 - total_cost = 0.0 - for obs in observations: - if obs.type == ObservationType.GENERATION: - total_tokens += obs.total_tokens or 0 - total_cost += obs.total_cost or 0.0 - return total_tokens, total_cost diff --git a/backend/app/services/user_service.py b/backend/app/services/user_service.py index c94089415..f0a806428 100644 --- a/backend/app/services/user_service.py +++ b/backend/app/services/user_service.py @@ -8,7 +8,7 @@ from sqlalchemy.ext.asyncio import AsyncSession -from app.common.exceptions import BadRequestException, NotFoundException +from app.common.app_errors import InvalidRequestError, NotFoundError from app.models.auth import AuthUser as User from app.repositories.user import UserRepository @@ -60,7 +60,11 @@ async def create_user( Note: does not include password setup; password operations live in AuthService. """ if await self.user_repo.email_exists(email): - raise BadRequestException("Email already registered") + raise InvalidRequestError( + "Email already registered", + code="USER_ALREADY_EXISTS", + data={"email": email}, + ) user_data = { "name": name, @@ -91,7 +95,11 @@ async def update_user( user.name = name if email is not None: if email != user.email and await self.user_repo.email_exists(email, exclude_id=user.id): - raise BadRequestException("Email already registered") + raise InvalidRequestError( + "Email already registered", + code="USER_ALREADY_EXISTS", + data={"email": email}, + ) user.email = email if image is not None: user.image = image @@ -115,7 +123,7 @@ async def delete_user(self, user_id: str) -> bool: """Delete a user.""" user = await self.user_repo.get_by_id(user_id) if not user: - raise NotFoundException("User not found") + raise NotFoundError("User not found", code="USER_NOT_FOUND", data={"user_id": user_id}) import uuid as uuid_lib diff --git a/backend/app/services/workspace_file_service.py b/backend/app/services/workspace_file_service.py deleted file mode 100644 index 8178ae82c..000000000 --- a/backend/app/services/workspace_file_service.py +++ /dev/null @@ -1,275 +0,0 @@ -""" -Workspace file storage service. -""" - -from __future__ import annotations - -import asyncio -import secrets -import uuid -from datetime import datetime, timedelta, timezone -from pathlib import Path -from typing import Dict, List, Optional - -from fastapi import UploadFile -from jose import JWTError, jwt - -from app.common.exceptions import BadRequestException, ConflictException, ForbiddenException, NotFoundException -from app.core.settings import settings -from app.models.access_control import PermissionType -from app.models.auth import AuthUser as User -from app.models.workspace import WorkspaceMemberRole -from app.repositories.workspace import WorkspaceMemberRepository, WorkspaceRepository -from app.repositories.workspace_file import WorkspaceStoredFileRepository -from app.utils.path_utils import sanitize_filename - -from .base import BaseService - - -class WorkspaceFileService(BaseService): - """Workspace file business logic.""" - - CONTEXT = WorkspaceStoredFileRepository.CONTEXT_WORKSPACE - MAX_FILE_SIZE_BYTES = 100 * 1024 * 1024 # 100MB per-file limit - DEFAULT_STORAGE_LIMIT_BYTES = 5 * 1024 * 1024 * 1024 # 5GB simple quota (adjustable / billable) - DOWNLOAD_TOKEN_EXPIRE_MINUTES = 15 - - def __init__(self, db): - super().__init__(db) - self.workspace_repo = WorkspaceRepository(db) - self.member_repo = WorkspaceMemberRepository(db) - self.file_repo = WorkspaceStoredFileRepository(db) - - # ------------------------------------------------------------------ # - # internal utilities - # ------------------------------------------------------------------ # - def _storage_root(self) -> Path: - """Unified file storage root directory.""" - return Path(settings.WORKSPACE_ROOT) / "workspace_files" - - def _sanitize_filename(self, filename: str) -> str: - """Sanitize filename to prevent path traversal. - - Use the unified sanitize_filename utility function. - """ - return sanitize_filename(filename or "unnamed") - - def _generate_key(self, workspace_id: uuid.UUID, filename: str) -> str: - """Generate a storage key: workspace//--.""" - timestamp = int(datetime.now(timezone.utc).timestamp() * 1000) - random_part = secrets.token_hex(4) - safe_name = self._sanitize_filename(filename).replace(" ", "-") - return f"workspace/{workspace_id}/{timestamp}-{random_part}-{safe_name}" - - def _build_serve_path(self, workspace_id: uuid.UUID, file_id: uuid.UUID) -> str: - """Generate the external file access path (without signature).""" - return f"/api/workspaces/{workspace_id}/files/{file_id}/serve" - - async def _write_file(self, path: Path, content: bytes) -> None: - path.parent.mkdir(parents=True, exist_ok=True) - await asyncio.to_thread(path.write_bytes, content) - - def _get_permission_by_role(self, role: WorkspaceMemberRole | None) -> PermissionType: - if role in (WorkspaceMemberRole.owner, WorkspaceMemberRole.admin, WorkspaceMemberRole.member): - return PermissionType.write - return PermissionType.read - - def _check_permission(self, required: PermissionType, actual: PermissionType) -> None: - if required == PermissionType.read: - return - if required == PermissionType.write and actual != PermissionType.write: - raise ForbiddenException("Write permission required") - - async def _ensure_member_role(self, workspace_id: uuid.UUID, user: User) -> WorkspaceMemberRole: - workspace = await self.workspace_repo.get(workspace_id) - if not workspace: - raise NotFoundException("Workspace not found") - - if user.is_superuser or workspace.owner_id == user.id: - return WorkspaceMemberRole.owner - - member = await self.member_repo.get_member(workspace_id, user.id) - if not member: - raise ForbiddenException("No access to workspace") - return member.role # type: ignore - - def _token_payload(self, workspace_id: uuid.UUID, file_id: uuid.UUID, user_id: uuid.UUID) -> Dict: - now = datetime.now(timezone.utc) - return { - "sub": str(user_id), - "workspace_id": str(workspace_id), - "file_id": str(file_id), - "type": "workspace_file", - "iat": now, - "exp": now + timedelta(minutes=self.DOWNLOAD_TOKEN_EXPIRE_MINUTES), - } - - def _validate_download_token(self, token: str, workspace_id: uuid.UUID, file_id: uuid.UUID) -> Optional[str]: - try: - payload = jwt.decode( - token, - settings.secret_key, - algorithms=[settings.algorithm], - options={"verify_aud": False}, - ) - if payload.get("type") != "workspace_file": - return None - if payload.get("workspace_id") != str(workspace_id): - return None - if payload.get("file_id") != str(file_id): - return None - sub = payload.get("sub") - return str(sub) if sub is not None else None - except JWTError: - return None - - def _file_path_from_key(self, key: str) -> Path: - return self._storage_root() / key - - def get_file_path(self, record) -> Path: - """Get the absolute file path.""" - return self._file_path_from_key(record.key) - - def _serialize_file(self, record) -> Dict: - serve_path = self._build_serve_path(record.workspace_id, record.id) - return { - "id": str(record.id), - "workspaceId": str(record.workspace_id) if record.workspace_id else None, - "name": record.original_name, - "key": record.key, - "path": serve_path, - "url": serve_path, - "size": record.size, - "type": record.content_type, - "uploadedBy": str(record.user_id), - "uploadedAt": record.uploaded_at, - } - - # ------------------------------------------------------------------ # - # public methods - # ------------------------------------------------------------------ # - async def list_files(self, workspace_id: uuid.UUID, current_user: User) -> List[Dict]: - role = await self._ensure_member_role(workspace_id, current_user) - self._check_permission(PermissionType.read, self._get_permission_by_role(role)) - - records = await self.file_repo.list_workspace_files(workspace_id) - return [self._serialize_file(rec) for rec in records] - - async def upload_file(self, workspace_id: uuid.UUID, file: UploadFile, current_user: User) -> Dict: - role = await self._ensure_member_role(workspace_id, current_user) - self._check_permission(PermissionType.write, self._get_permission_by_role(role)) - - content = await file.read() - size = len(content) - if size == 0: - raise BadRequestException("File is empty") - if size > self.MAX_FILE_SIZE_BYTES: - raise BadRequestException("File exceeds size limit (100MB)") - - original_name = self._sanitize_filename(file.filename or "unnamed") - exists = await self.file_repo.find_by_name(workspace_id, original_name) - if exists: - # aligned with legacy project: duplicate file returns 409 with isDuplicate flag - raise ConflictException( - f'A file named "{original_name}" already exists in this workspace', - data={"isDuplicate": True}, - ) - - # simple quota check - current_usage = await self.file_repo.sum_user_usage(current_user.id) - if current_usage + size > self.DEFAULT_STORAGE_LIMIT_BYTES: - raise ForbiddenException("Storage limit exceeded") - - key = self._generate_key(workspace_id, original_name) - path = self._file_path_from_key(key) - - await self._write_file(path, content) - - record = await self.file_repo.create( - { - "key": key, - "user_id": current_user.id, - "workspace_id": workspace_id, - "context": self.CONTEXT, - "original_name": original_name, - "content_type": file.content_type or "application/octet-stream", - "size": size, - } - ) - await self.commit() - - return self._serialize_file(record) - - async def delete_file(self, workspace_id: uuid.UUID, file_id: uuid.UUID, current_user: User) -> None: - role = await self._ensure_member_role(workspace_id, current_user) - self._check_permission(PermissionType.write, self._get_permission_by_role(role)) - - record = await self.file_repo.get_by_id_and_workspace(file_id, workspace_id) - if not record: - raise NotFoundException("File not found") - - file_path = self._file_path_from_key(record.key) - if file_path.exists(): - await asyncio.to_thread(file_path.unlink) - - await self.file_repo.delete(record.id) - await self.commit() - - async def generate_download_url(self, workspace_id: uuid.UUID, file_id: uuid.UUID, current_user: User) -> str: - role = await self._ensure_member_role(workspace_id, current_user) - self._check_permission(PermissionType.read, self._get_permission_by_role(role)) - - record = await self.file_repo.get_by_id_and_workspace(file_id, workspace_id) - if not record: - raise NotFoundException("File not found") - - import uuid as uuid_lib - - file_uuid = file_id if isinstance(file_id, uuid.UUID) else uuid_lib.UUID(str(file_id)) - user_uuid = current_user.id if isinstance(current_user.id, uuid.UUID) else uuid_lib.UUID(str(current_user.id)) - token = jwt.encode( - self._token_payload(workspace_id, file_uuid, user_uuid), - settings.secret_key, - algorithm=settings.algorithm, - ) - return f"{self._build_serve_path(workspace_id, file_id)}?token={token}" - - async def get_file_record(self, workspace_id: uuid.UUID, file_id: uuid.UUID): - record = await self.file_repo.get_by_id_and_workspace(file_id, workspace_id) - if not record: - raise NotFoundException("File not found") - return record - - async def read_file_bytes(self, record) -> bytes: - file_path = self._file_path_from_key(record.key) - if not file_path.exists(): - raise NotFoundException("File content missing") - return await asyncio.to_thread(file_path.read_bytes) - - async def validate_token_or_user( - self, - workspace_id: uuid.UUID, - file_id: uuid.UUID, - token: Optional[str], - current_user: Optional[User], - ) -> None: - if token: - user_sub = self._validate_download_token(token, workspace_id, file_id) - if not user_sub: - raise ForbiddenException("Invalid or expired download token") - return - - if not current_user: - raise ForbiddenException("Authentication required") - role = await self._ensure_member_role(workspace_id, current_user) - self._check_permission(PermissionType.read, self._get_permission_by_role(role)) - - async def get_user_storage_usage(self, user: User) -> Dict: - used = await self.file_repo.sum_user_usage(user.id) - limit_bytes = self.DEFAULT_STORAGE_LIMIT_BYTES - percent_used = (used / limit_bytes * 100) if limit_bytes else 0 - return { - "usedBytes": used, - "limitBytes": limit_bytes, - "percentUsed": percent_used, - } diff --git a/backend/app/services/workspace_folder_service.py b/backend/app/services/workspace_folder_service.py deleted file mode 100644 index a2575a2ff..000000000 --- a/backend/app/services/workspace_folder_service.py +++ /dev/null @@ -1,357 +0,0 @@ -""" -Folder business logic. -""" - -from __future__ import annotations - -import uuid -from datetime import datetime, timezone -from typing import Dict, List, Optional, Set, Tuple - -from sqlalchemy import select - -from app.common.exceptions import BadRequestException, ForbiddenException, NotFoundException -from app.models.auth import AuthUser as User -from app.models.workspace import WorkspaceFolder, WorkspaceMemberRole -from app.repositories.workspace import WorkspaceMemberRepository, WorkspaceRepository -from app.repositories.workspace_folder import WorkflowFolderRepository - -from .base import BaseService - -# max folder depth limit: only two levels (root depth is 0, first level depth is 1) -MAX_FOLDER_DEPTH = 1 - - -class FolderService(BaseService[WorkspaceFolder]): - """Folder service.""" - - def __init__(self, db): - super().__init__(db) - self.folder_repo = WorkflowFolderRepository(db) - self.workspace_repo = WorkspaceRepository(db) - self.member_repo = WorkspaceMemberRepository(db) - - # ------------------------------------------------------------------ # - # permission checks - # ------------------------------------------------------------------ # - async def _get_member_role(self, workspace_id: uuid.UUID, current_user: User) -> Optional[str]: - workspace = await self.workspace_repo.get(workspace_id) - if not workspace: - raise NotFoundException("Workspace not found") - if current_user.is_superuser or workspace.owner_id == current_user.id: - return WorkspaceMemberRole.owner - member = await self.member_repo.get_member(workspace_id, current_user.id) - return member.role if member else None - - async def _ensure_permission( - self, - workspace_id: uuid.UUID, - current_user: User, - required: str, - ) -> str: - role = await self._get_member_role(workspace_id, current_user) - if role is None: - raise ForbiddenException("No access to workspace") - - if required == "read": - return role - - if required == "write" and role in { - WorkspaceMemberRole.owner, - WorkspaceMemberRole.admin, - WorkspaceMemberRole.member, - }: - return role - - if required == "admin" and role in {WorkspaceMemberRole.owner, WorkspaceMemberRole.admin}: - return role - - raise ForbiddenException("Insufficient workspace permission") - - # ------------------------------------------------------------------ # - # queries - # ------------------------------------------------------------------ # - async def list_folders(self, workspace_id: uuid.UUID, *, current_user: User) -> List[WorkspaceFolder]: - await self._ensure_permission(workspace_id, current_user, "read") - result = await self.folder_repo.list_by_workspace(workspace_id) - return list(result) if result is not None else [] - - # ------------------------------------------------------------------ # - # tree/cycle detection helpers - # ------------------------------------------------------------------ # - async def _build_children_index(self, workspace_id: uuid.UUID) -> Dict[Optional[uuid.UUID], List[uuid.UUID]]: - relations = await self.folder_repo.list_relations_by_workspace(workspace_id) - children: Dict[Optional[uuid.UUID], List[uuid.UUID]] = {} - for fid, pid in relations: - children.setdefault(pid, []).append(fid) - return children - - async def _collect_subtree_ids(self, workspace_id: uuid.UUID, root_id: uuid.UUID) -> List[uuid.UUID]: - """ - Return root_id and all its descendant folder IDs (BFS), for deletion/duplication. - Only traverse within the workspace to avoid cross-workspace parentId pollution. - """ - children_index = await self._build_children_index(workspace_id) - queue: List[uuid.UUID] = [root_id] - visited: Set[uuid.UUID] = set() - out: List[uuid.UUID] = [] - - while queue: - current = queue.pop(0) - if current in visited: - continue - visited.add(current) - out.append(current) - for child_id in children_index.get(current, []): - if child_id not in visited: - queue.append(child_id) - - return out - - async def _would_create_cycle( - self, - *, - workspace_id: uuid.UUID, - folder_id: uuid.UUID, - new_parent_id: uuid.UUID, - ) -> bool: - """Check whether a cycle would be created.""" - seen: Set[uuid.UUID] = set() - current: Optional[uuid.UUID] = new_parent_id - - while current is not None: - if current == folder_id: - return True - if current in seen: - return True - seen.add(current) - - node = await self.folder_repo.ensure_same_workspace(current, workspace_id) - current = node.parent_id - - return False - - async def _calculate_depth(self, folder_id: uuid.UUID, workspace_id: uuid.UUID) -> int: - """ - Calculate the folder depth (starting from root, root depth is 0). - """ - depth = 0 - current_id: Optional[uuid.UUID] = folder_id - - while current_id is not None: - folder = await self.folder_repo.ensure_same_workspace(current_id, workspace_id) - if folder.parent_id is None: - break - depth += 1 - current_id = folder.parent_id - if depth > MAX_FOLDER_DEPTH: - break - - return depth - - async def _check_depth_limit(self, parent_id: Optional[uuid.UUID], workspace_id: uuid.UUID) -> None: - """Check whether creating a subfolder under the specified parent would exceed the depth limit.""" - if parent_id is None: - return - - parent_depth = await self._calculate_depth(parent_id, workspace_id) - if parent_depth >= MAX_FOLDER_DEPTH: - raise BadRequestException(f"Maximum folder depth ({MAX_FOLDER_DEPTH + 1}) would be exceeded") - - # ------------------------------------------------------------------ # - # create - # ------------------------------------------------------------------ # - async def create_folder( - self, - *, - workspace_id: uuid.UUID, - current_user: User, - name: str, - color: Optional[str] = None, - parent_id: Optional[uuid.UUID] = None, - is_expanded: bool = False, - ) -> WorkspaceFolder: - await self._ensure_permission(workspace_id, current_user, "write") - await self._check_depth_limit(parent_id, workspace_id) - - if parent_id: - await self.folder_repo.ensure_same_workspace(parent_id, workspace_id) - - next_sort = (await self.folder_repo.max_sort_order(workspace_id, parent_id)) + 1 - folder = await self.folder_repo.create( - { - "name": name.strip(), - "user_id": current_user.id, - "workspace_id": workspace_id, - "parent_id": parent_id, - "color": color or "#6B7280", - "is_expanded": is_expanded, - "sort_order": next_sort, - } - ) - await self.commit() - result = folder - return result # type: ignore - - # ------------------------------------------------------------------ # - # update - # ------------------------------------------------------------------ # - async def update_folder( - self, - folder_id: uuid.UUID, - *, - workspace_id: uuid.UUID, - current_user: User, - name: Optional[str] = None, - color: Optional[str] = None, - is_expanded: Optional[bool] = None, - parent_id: Optional[uuid.UUID] = None, - ) -> WorkspaceFolder: - await self._ensure_permission(workspace_id, current_user, "write") - folder = await self.folder_repo.ensure_same_workspace(folder_id, workspace_id) - - if parent_id is not None: - if parent_id == folder.id: - raise BadRequestException("Folder cannot be its own parent") - if parent_id: - await self.folder_repo.ensure_same_workspace(parent_id, workspace_id) - if await self._would_create_cycle( - workspace_id=workspace_id, folder_id=folder.id, new_parent_id=parent_id - ): - raise BadRequestException("Cannot create circular folder reference") - await self._check_depth_limit(parent_id, workspace_id) - - update_data: Dict[str, object] = {} - if name is not None: - update_data["name"] = name.strip() - if color is not None: - update_data["color"] = color - if is_expanded is not None: - update_data["is_expanded"] = is_expanded - if parent_id is not None: - update_data["parent_id"] = parent_id - - if update_data: - folder = await self.folder_repo.update(folder.id, update_data) # type: ignore - await self.commit() - result = folder - return result # type: ignore - - # ------------------------------------------------------------------ # - # delete - # ------------------------------------------------------------------ # - async def delete_folder( - self, - folder_id: uuid.UUID, - *, - workspace_id: uuid.UUID, - current_user: User, - ) -> int: - stats = await self.delete_folder_tree( - folder_id, - workspace_id=workspace_id, - current_user=current_user, - ) - return stats["folders"] - - async def delete_folder_tree( - self, - folder_id: uuid.UUID, - *, - workspace_id: uuid.UUID, - current_user: User, - ) -> Dict[str, int]: - """Recursively soft-delete the entire folder subtree; requires write permission (member and above).""" - await self._ensure_permission(workspace_id, current_user, "write") - await self.folder_repo.ensure_same_workspace(folder_id, workspace_id) - - target_ids = await self._collect_subtree_ids(workspace_id, folder_id) - deleted_at = datetime.now(timezone.utc) - for folder_id_to_delete in target_ids: - await self.folder_repo.update(folder_id_to_delete, {"deleted_at": deleted_at}) - - await self.commit() - return {"folders": len(target_ids), "workflows": 0} - - # ------------------------------------------------------------------ # - # duplicate - # ------------------------------------------------------------------ # - async def duplicate_folder( - self, - folder_id: uuid.UUID, - *, - workspace_id: uuid.UUID, - current_user: User, - name: Optional[str] = None, - parent_id: Optional[uuid.UUID] = None, - color: Optional[str] = None, - ) -> WorkspaceFolder: - source = await self.folder_repo.get(folder_id) - if not source: - raise NotFoundException("Folder not found") - - await self._ensure_permission(source.workspace_id, current_user, "read") - await self._ensure_permission(workspace_id, current_user, "write") - - effective_parent_id: Optional[uuid.UUID] - if parent_id is not None: - effective_parent_id = parent_id - else: - effective_parent_id = source.parent_id if workspace_id == source.workspace_id else None - - if effective_parent_id: - await self.folder_repo.ensure_same_workspace(effective_parent_id, workspace_id) - - source_subtree_ids = await self._collect_subtree_ids(source.workspace_id, source.id) - folders_result = await self.db.execute( - select(WorkspaceFolder).where(WorkspaceFolder.id.in_(source_subtree_ids)) - ) - source_folders = list(folders_result.scalars().all()) - source_by_id: Dict[uuid.UUID, WorkspaceFolder] = {f.id: f for f in source_folders} - folder_id_map: Dict[uuid.UUID, uuid.UUID] = {fid: uuid.uuid4() for fid in source_subtree_ids} - - async with self.db.begin(): - new_root_id = folder_id_map[source.id] - new_root = WorkspaceFolder( - id=new_root_id, - name=(name or f"{source.name} Copy").strip(), - user_id=current_user.id, - workspace_id=workspace_id, - parent_id=effective_parent_id, - color=color or source.color, - is_expanded=False, - sort_order=source.sort_order, - ) - self.db.add(new_root) - await self.db.flush() - - children_index = await self._build_children_index(source.workspace_id) - queue: List[Tuple[uuid.UUID, uuid.UUID]] = [(source.id, new_root_id)] - - while queue: - old_parent_id, new_parent_id = queue.pop(0) - for old_child_id in children_index.get(old_parent_id, []): - if old_child_id not in source_by_id: - continue - old_child = source_by_id[old_child_id] - new_child_id = folder_id_map[old_child_id] - - self.db.add( - WorkspaceFolder( - id=new_child_id, - name=old_child.name.strip(), - user_id=current_user.id, - workspace_id=workspace_id, - parent_id=new_parent_id, - color=old_child.color, - is_expanded=False, - sort_order=old_child.sort_order, - ) - ) - queue.append((old_child_id, new_child_id)) - - await self.db.flush() - - await self.commit() - result = await self.folder_repo.get(new_root_id) - return result # type: ignore diff --git a/backend/app/services/workspace_service.py b/backend/app/services/workspace_service.py index f6fc4930d..3d2cea474 100644 --- a/backend/app/services/workspace_service.py +++ b/backend/app/services/workspace_service.py @@ -5,7 +5,7 @@ import uuid from typing import Any, Dict, List, Optional -from app.common.exceptions import BadRequestException, ForbiddenException, NotFoundException +from app.common.app_errors import AccessDeniedError, InvalidRequestError, NotFoundError from app.common.pagination import PageResult, PaginationParams from app.models.auth import AuthUser as User from app.models.workspace import Workspace, WorkspaceMemberRole, WorkspaceType @@ -59,12 +59,16 @@ async def _get_role(self, workspace: Workspace, current_user: User) -> Workspace async def _ensure_member(self, workspace_id: uuid.UUID, current_user: User) -> WorkspaceMemberRole: workspace = await self.workspace_repo.get(workspace_id) if not workspace: - raise NotFoundException("Workspace not found") + raise NotFoundError( + "Workspace not found", + code="WORKSPACE_NOT_FOUND", + data={"workspace_id": str(workspace_id)}, + ) if current_user.is_superuser or workspace.owner_id == current_user.id: return WorkspaceMemberRole.owner member = await self.member_repo.get_member(workspace_id, current_user.id) if not member: - raise ForbiddenException("No access to workspace") + raise AccessDeniedError("No access to workspace", code="WORKSPACE_ACCESS_DENIED") return member.role # type: ignore async def get_user_role(self, workspace_id: uuid.UUID, current_user: User) -> Optional[WorkspaceMemberRole]: @@ -77,13 +81,13 @@ async def get_user_role(self, workspace_id: uuid.UUID, current_user: User) -> Op try: # reuse the existing _ensure_member method, which already handles all cases (superuser, owner, regular member) return await self._ensure_member(workspace_id, current_user) - except (NotFoundException, ForbiddenException): + except (NotFoundError, AccessDeniedError): # if the user is not a member, return None instead of raising (this is a query method, not a validation method) return None def _ensure_admin_role(self, role: WorkspaceMemberRole): if role not in {WorkspaceMemberRole.owner, WorkspaceMemberRole.admin}: - raise ForbiddenException("Admin permission required") + raise AccessDeniedError("Admin permission required", code="WORKSPACE_PERMISSION_DENIED") async def list_workspaces(self, current_user: User) -> List[Dict]: workspaces = await self.workspace_repo.list_for_user(current_user.id) @@ -146,7 +150,11 @@ async def create_workspace( async def get_workspace(self, workspace_id: uuid.UUID, current_user: User) -> Dict: workspace = await self.workspace_repo.get(workspace_id) if not workspace: - raise NotFoundException("Workspace not found") + raise NotFoundError( + "Workspace not found", + code="WORKSPACE_NOT_FOUND", + data={"workspace_id": str(workspace_id)}, + ) await self._ensure_member(workspace_id, current_user) return await self._serialize_workspace(workspace, current_user) @@ -164,7 +172,11 @@ async def update_workspace( workspace = await self.workspace_repo.get(workspace_id) if not workspace: - raise NotFoundException("Workspace not found") + raise NotFoundError( + "Workspace not found", + code="WORKSPACE_NOT_FOUND", + data={"workspace_id": str(workspace_id)}, + ) update_data: Dict[str, Any] = {} if name is not None: @@ -202,10 +214,18 @@ async def delete_workspace( # check if it's a personal workspace; personal workspaces cannot be deleted workspace = await self.workspace_repo.get(workspace_id) if not workspace: - raise NotFoundException("Workspace not found") + raise NotFoundError( + "Workspace not found", + code="WORKSPACE_NOT_FOUND", + data={"workspace_id": str(workspace_id)}, + ) if workspace.type == WorkspaceType.personal: - raise BadRequestException("Personal workspace cannot be deleted") + raise InvalidRequestError( + "Personal workspace cannot be deleted", + code="PERSONAL_WORKSPACE_DELETE_FORBIDDEN", + data={"workspace_id": str(workspace_id)}, + ) # Revoke all tokens bound to this workspace from app.services.platform_token_service import PlatformTokenService @@ -229,14 +249,22 @@ async def duplicate_workspace( # get source workspace source_workspace = await self.workspace_repo.get(workspace_id) if not source_workspace: - raise NotFoundException("Workspace not found") + raise NotFoundError( + "Workspace not found", + code="WORKSPACE_NOT_FOUND", + data={"workspace_id": str(workspace_id)}, + ) # ensure user has permission to access the source workspace await self._ensure_member(workspace_id, current_user) # check if it's a personal workspace; personal workspaces cannot be duplicated if source_workspace.type == WorkspaceType.personal: - raise BadRequestException("Personal workspace cannot be duplicated") + raise InvalidRequestError( + "Personal workspace cannot be duplicated", + code="PERSONAL_WORKSPACE_DUPLICATE_FORBIDDEN", + data={"workspace_id": str(workspace_id)}, + ) # generate new name new_name = name or f"{source_workspace.name} (Copy)" @@ -276,18 +304,22 @@ async def add_member( self._ensure_admin_role(member_role) if role not in WorkspaceMemberRole._value2member_map_: - raise BadRequestException("Invalid role") + raise InvalidRequestError("Invalid role", code="WORKSPACE_MEMBER_ROLE_INVALID", data={"role": role}) target_role = WorkspaceMemberRole(role) # owner role cannot be assigned via add member if target_role == WorkspaceMemberRole.owner: - raise BadRequestException("Cannot assign owner role") + raise InvalidRequestError("Cannot assign owner role", code="WORKSPACE_OWNER_ROLE_ASSIGNMENT_FORBIDDEN") # role hierarchy protection: non-owner cannot add a role >= their own if member_role != WorkspaceMemberRole.owner: if ROLE_RANK.get(target_role, 0) >= ROLE_RANK.get(member_role, 0): - raise ForbiddenException("Cannot add a member with a role equal to or higher than your own") + raise AccessDeniedError( + "Cannot add a member with a role equal to or higher than your own", + code="WORKSPACE_MEMBER_ROLE_TOO_HIGH", + data={"role": target_role.value}, + ) from app.repositories.auth_user import AuthUserRepository @@ -295,11 +327,15 @@ async def add_member( target_user = await user_repo.get_by_email(email.lower()) if not target_user: - raise NotFoundException("User not found") + raise NotFoundError("User not found", code="USER_NOT_FOUND", data={"email": email}) existing_member = await self.member_repo.get_member(workspace_id, target_user.id) if existing_member: - raise BadRequestException(f"User with email {email} is already a member of this workspace") + raise InvalidRequestError( + f"User with email {email} is already a member of this workspace", + code="WORKSPACE_MEMBER_ALREADY_EXISTS", + data={"email": email, "workspace_id": str(workspace_id)}, + ) await self.member_repo.create({"workspace_id": workspace_id, "user_id": target_user.id, "role": target_role}) await self.commit() @@ -328,7 +364,11 @@ async def list_members_paginated( workspace = await self.workspace_repo.get(workspace_id) if not workspace: - raise NotFoundException("Workspace not found") + raise NotFoundError( + "Workspace not found", + code="WORKSPACE_NOT_FOUND", + data={"workspace_id": str(workspace_id)}, + ) await self._ensure_member(workspace_id, current_user) @@ -401,7 +441,11 @@ async def update_member_role( """Update member role.""" workspace = await self.workspace_repo.get(workspace_id) if not workspace: - raise NotFoundException("Workspace not found") + raise NotFoundError( + "Workspace not found", + code="WORKSPACE_NOT_FOUND", + data={"workspace_id": str(workspace_id)}, + ) # ensure current user is admin current_role = await self._ensure_member(workspace_id, current_user) @@ -410,28 +454,38 @@ async def update_member_role( # get target member target_member = await self.member_repo.get_member(workspace_id, target_user_id) if not target_member: - raise NotFoundException("User not found in workspace") + raise NotFoundError( + "User not found in workspace", + code="WORKSPACE_MEMBER_NOT_FOUND", + data={"user_id": str(target_user_id), "workspace_id": str(workspace_id)}, + ) # cannot modify the owner's role if workspace.owner_id == target_user_id: - raise BadRequestException("Cannot change owner role") + raise InvalidRequestError("Cannot change owner role", code="WORKSPACE_OWNER_ROLE_CHANGE_FORBIDDEN") # owner role cannot be assigned via role update if new_role == WorkspaceMemberRole.owner: - raise BadRequestException("Cannot assign owner role") + raise InvalidRequestError("Cannot assign owner role", code="WORKSPACE_OWNER_ROLE_ASSIGNMENT_FORBIDDEN") # role hierarchy protection: non-owner cannot modify members >= their own level if current_role != WorkspaceMemberRole.owner: if ROLE_RANK.get(target_member.role, 0) >= ROLE_RANK.get(current_role, 0): - raise ForbiddenException("Cannot modify a member with equal or higher role") + raise AccessDeniedError( + "Cannot modify a member with equal or higher role", code="WORKSPACE_MEMBER_ROLE_TOO_HIGH" + ) if ROLE_RANK.get(new_role, 0) >= ROLE_RANK.get(current_role, 0): - raise ForbiddenException("Cannot assign a role equal to or higher than your own") + raise AccessDeniedError( + "Cannot assign a role equal to or higher than your own", code="WORKSPACE_MEMBER_ROLE_TOO_HIGH" + ) # if modifying an admin, check if they are the last admin if target_member.role in {WorkspaceMemberRole.owner, WorkspaceMemberRole.admin}: admin_count = await self.member_repo.count_admins(workspace_id) if admin_count <= 1 and new_role not in {WorkspaceMemberRole.owner, WorkspaceMemberRole.admin}: - raise BadRequestException("Cannot remove the last admin from a workspace") + raise InvalidRequestError( + "Cannot remove the last admin from a workspace", code="WORKSPACE_LAST_ADMIN_REMOVE_FORBIDDEN" + ) # update role updated_member = await self.member_repo.update_member_role(workspace_id, target_user_id, new_role) @@ -474,16 +528,24 @@ async def remove_member( """ workspace = await self.workspace_repo.get(workspace_id) if not workspace: - raise NotFoundException("Workspace not found") + raise NotFoundError( + "Workspace not found", + code="WORKSPACE_NOT_FOUND", + data={"workspace_id": str(workspace_id)}, + ) # get target user's member record target_member = await self.member_repo.get_member(workspace_id, str(target_user_id)) if not target_member: - raise NotFoundException("User not found in workspace") + raise NotFoundError( + "User not found in workspace", + code="WORKSPACE_MEMBER_NOT_FOUND", + data={"user_id": str(target_user_id), "workspace_id": str(workspace_id)}, + ) # cannot remove the workspace owner if str(workspace.owner_id) == str(target_user_id): - raise BadRequestException("Cannot remove workspace owner") + raise InvalidRequestError("Cannot remove workspace owner", code="WORKSPACE_OWNER_REMOVE_FORBIDDEN") # get current user's role current_role = await self._get_role(workspace, current_user) @@ -491,19 +553,23 @@ async def remove_member( is_self = str(target_user_id) == current_user.id if not is_admin and not is_self: - raise ForbiddenException("Insufficient permissions") + raise AccessDeniedError("Insufficient permissions", code="WORKSPACE_PERMISSION_DENIED") # role hierarchy protection: non-owner cannot remove members >= their own level if is_admin and not is_self and current_role != WorkspaceMemberRole.owner: assert isinstance(current_role, WorkspaceMemberRole) if ROLE_RANK.get(target_member.role, 0) >= ROLE_RANK.get(current_role, 0): - raise ForbiddenException("Cannot remove a member with equal or higher role") + raise AccessDeniedError( + "Cannot remove a member with equal or higher role", code="WORKSPACE_MEMBER_ROLE_TOO_HIGH" + ) # if removing an admin/owner role member, check if they are the last admin if target_member.role in {WorkspaceMemberRole.owner, WorkspaceMemberRole.admin}: admin_count = await self.member_repo.count_admins(workspace_id) if admin_count <= 1: - raise BadRequestException("Cannot remove the last admin from a workspace") + raise InvalidRequestError( + "Cannot remove the last admin from a workspace", code="WORKSPACE_LAST_ADMIN_REMOVE_FORBIDDEN" + ) # execute deletion await self.member_repo.delete_member(workspace_id, str(target_user_id)) diff --git a/backend/app/utils/credentials.py b/backend/app/utils/credentials.py new file mode 100644 index 000000000..de8b81ff1 --- /dev/null +++ b/backend/app/utils/credentials.py @@ -0,0 +1,55 @@ +""" +Shared credential helpers for CLI agent containers. +""" + +from __future__ import annotations + +import os +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from app.models.agent import Agent + +# Host env keys that should be passed through to CLI agent containers +PASSTHROUGH_ENV_KEYS = ( + "ANTHROPIC_API_KEY", + "OPENAI_API_KEY", + "AI_GATEWAY_BASE_URL", + "AI_GATEWAY_API_KEY", + "AI_GATEWAY_PROVIDER", + "AI_GATEWAY_MODEL", + "ANTHROPIC_BASE_URL", + "ANTHROPIC_AUTH_TOKEN", + "ANTHROPIC_MODEL", +) + + +def build_credentials(custom_env: dict[str, str] | None) -> dict[str, str]: + """Merge agent custom_env with host AI provider keys (agent overrides host). + + Raises ValueError if no Anthropic API key is found from any source. + """ + env: dict[str, str] = {} + for key in PASSTHROUGH_ENV_KEYS: + val = os.environ.get(key) + if val: + env[key] = val + if custom_env: + env.update(custom_env) + + has_key = env.get("ANTHROPIC_API_KEY") or env.get("ANTHROPIC_AUTH_TOKEN") + if not has_key: + raise ValueError( + "CLI agent requires an Anthropic API key. " + "Set ANTHROPIC_API_KEY or ANTHROPIC_AUTH_TOKEN in " + "the backend environment (.env) or in the agent's custom_env." + ) + return env + + +def build_agent_credentials(agent: Agent) -> dict[str, str]: + """Build credentials for an agent, merging its custom_env with host keys.""" + from app.core.model.utils import decrypt_credentials + + custom_env = decrypt_credentials(agent.encrypted_custom_env) if agent.encrypted_custom_env else None + return build_credentials(custom_env) diff --git a/backend/app/utils/mcp.py b/backend/app/utils/mcp.py index c969326fa..17387092c 100644 --- a/backend/app/utils/mcp.py +++ b/backend/app/utils/mcp.py @@ -14,6 +14,7 @@ raise ImportError("`mcp` not installed. Please install using `pip install mcp`") +from app.common.app_errors import InternalServiceError from app.utils import Audio, File, Image, Video @@ -64,14 +65,18 @@ async def call_tool(tool_name: str, **kwargs) -> ToolResult: return partial(call_tool, tool_name=tool.name) -class ToolExecutionError(Exception): +class ToolExecutionError(InternalServiceError): """Tool execution error.""" def __init__(self, message: str, error_type: str = "unknown", retryable: bool = False): - self.message = message - self.error_type = error_type # 'network', 'timeout', 'config', 'permission', 'unknown' - self.retryable = retryable - super().__init__(self.message) + self.error_type = error_type + super().__init__( + code="TOOL_EXECUTION_FAILED", + message=message, + source="tool", + retryable=retryable, + data={"error_type": error_type} if error_type != "unknown" else None, + ) def _is_retryable_error(error: Exception) -> bool: diff --git a/backend/app/utils/mentions.py b/backend/app/utils/mentions.py new file mode 100644 index 000000000..eabfdb579 --- /dev/null +++ b/backend/app/utils/mentions.py @@ -0,0 +1,33 @@ +""" +Mention parsing for comments. + +Format: [@DisplayName](mention://agent/) or [@DisplayName](mention://member/) +""" + +from __future__ import annotations + +import re +import uuid +from dataclasses import dataclass + +_MENTION_RE = re.compile( + r"\[@([^\]]*)\]\(mention://(agent|member)/([0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})\)" +) + + +@dataclass(frozen=True) +class Mention: + display_name: str + type: str # "agent" | "member" + id: uuid.UUID + + +def parse_mentions(content: str) -> list[Mention]: + return [ + Mention(display_name=m.group(1), type=m.group(2), id=uuid.UUID(m.group(3))) + for m in _MENTION_RE.finditer(content) + ] + + +def agent_mentions(content: str) -> list[Mention]: + return [m for m in parse_mentions(content) if m.type == "agent"] diff --git a/backend/app/utils/safe_task.py b/backend/app/utils/safe_task.py new file mode 100644 index 000000000..95e3cd97d --- /dev/null +++ b/backend/app/utils/safe_task.py @@ -0,0 +1,30 @@ +""" +safe_create_task — drop-in replacement for asyncio.create_task with error logging. + +Usage: + from app.utils.safe_task import safe_create_task + + safe_create_task(some_coroutine(), name="my-task") +""" + +import asyncio + +from loguru import logger + + +def safe_create_task(coro, *, name: str | None = None) -> asyncio.Task: + """Create an asyncio task with automatic exception logging via done-callback.""" + task = asyncio.create_task(coro, name=name) + task.add_done_callback(_log_task_exception) + return task + + +def _log_task_exception(task: asyncio.Task) -> None: + if task.cancelled(): + return + exc = task.exception() + if exc: + logger.error( + f"Background task '{task.get_name()}' failed: {exc}", + exc_info=exc, + ) diff --git a/backend/app/utils/stream_event_handler.py b/backend/app/utils/stream_event_handler.py deleted file mode 100644 index 225fa6ccc..000000000 --- a/backend/app/utils/stream_event_handler.py +++ /dev/null @@ -1,1043 +0,0 @@ -""" -Stream Event Handler (Production) - -Process LangGraph event streams and convert them to standardized SSE format. -Use map-based hierarchy tracking (modeled after Langfuse CallbackHandler architecture), -establishing N-level observation hierarchy via run_id + parent_run_id. - -Core design: -- StreamState: map-based observation management (replaces stack) -- ObservationRecord: enhanced in-memory observation record -- StreamEventHandler: event -> SSE conversion; all handlers receive run_id/parent_run_id -- format_sse: safe serialization with graceful degradation -""" - -import json -import time -import uuid -from dataclasses import dataclass -from enum import Enum -from typing import Any, Optional - -from langchain_core.messages.base import BaseMessage -from loguru import logger - -from app.utils.message_serializer import serialize_messages, truncate_data -from app.utils.token_usage import extract_usage_from_output - -# ============ LangGraph control-flow exceptions (not marked as ERROR) ============ - -CONTROL_FLOW_EXCEPTIONS: set[type] = set() -try: - from langgraph.errors import GraphBubbleUp - - CONTROL_FLOW_EXCEPTIONS.add(GraphBubbleUp) -except ImportError: - pass - - -# ============ Observation Enums ============ - - -class ObsType(str, Enum): - SPAN = "SPAN" - GENERATION = "GENERATION" - TOOL = "TOOL" - EVENT = "EVENT" - - -class ObsLevel(str, Enum): - DEBUG = "DEBUG" - DEFAULT = "DEFAULT" - WARNING = "WARNING" - ERROR = "ERROR" - - -class ObsStatus(str, Enum): - RUNNING = "RUNNING" - COMPLETED = "COMPLETED" - FAILED = "FAILED" - INTERRUPTED = "INTERRUPTED" - - -# ============ ObservationRecord ============ - - -@dataclass -class ObservationRecord: - """ - In-memory observation record. - Batch-written to the database after the SSE stream ends. - """ - - id: str - trace_id: str - parent_observation_id: Optional[str] - type: ObsType - name: Optional[str] - start_time: float # epoch ms - # Lifecycle - end_time: Optional[float] = None - duration_ms: Optional[int] = None - status: ObsStatus = ObsStatus.RUNNING - # I/O - input_data: Optional[Any] = None - output_data: Optional[Any] = None - # Model info (GENERATION only) - model_name: Optional[str] = None - model_provider: Optional[str] = None - model_parameters: Optional[dict] = None - # Token usage (GENERATION only) - prompt_tokens: Optional[int] = None - completion_tokens: Optional[int] = None - total_tokens: Optional[int] = None - # Level / status - level: ObsLevel = ObsLevel.DEFAULT - status_message: Optional[str] = None - # Timestamps - completion_start_time: Optional[float] = None # time-to-first-token (GENERATION) - # Meta - metadata: Optional[dict] = None - version: Optional[str] = None # code/model version - - -# ============ StreamState ============ - - -class StreamState: - """ - Streaming state tracker. - - Use map-based hierarchy tracking (modeled after Langfuse runs + _child_to_parent_run_id_map) - instead of stack-based approach, correctly supporting concurrent and out-of-order events. - """ - - def __init__(self, thread_id: str): - self.thread_id = thread_id - self.all_messages: list[BaseMessage] = [] - self.assistant_content = "" - self.stopped = False - self.has_error = False - - # agent run artifacts directory (artifacts API) - self.artifact_run_id: str = str(uuid.uuid4()) - - # interrupt state - self.interrupted = False - self.interrupt_node: str | None = None - self.interrupt_state: dict | None = None - - # ============ Trace / Observation tracking ============ - from app.core.trace_context import get_trace_id - - self.trace_id: str = get_trace_id() or str(uuid.uuid4()) - self.trace_start_time: float = time.time() * 1000 # epoch ms - - # core mappings (modeled after Langfuse CallbackHandler) - # run_id -> ObservationRecord (active observations) - self._active: dict[str, ObservationRecord] = {} - # run_id -> parent_run_id (hierarchy) - self._parent_map: dict[str, Optional[str]] = {} - # all completed observations (for persistence) - self._completed: list[ObservationRecord] = [] - # run_id -> observation_id mapping - self._run_to_obs: dict[str, str] = {} - # observation_id -> run_id (reverse mapping) - self._obs_to_run: dict[str, str] = {} - # first-token tracking - self._completion_start_tracked: set[str] = set() - - def append_content(self, chunk: str): - """Append a content chunk.""" - self.assistant_content += chunk - - # ============ Observation lifecycle ============ - - def create_observation( - self, - *, - run_id: str, - parent_run_id: Optional[str] = None, - obs_type: ObsType, - name: Optional[str] = None, - input_data: Optional[Any] = None, - model_name: Optional[str] = None, - model_provider: Optional[str] = None, - model_parameters: Optional[dict] = None, - metadata: Optional[dict] = None, - ) -> str: - """ - Create an observation, establishing hierarchy via parent_run_id (not stack push). - - Modeled after Langfuse _attach_observation() + _child_to_parent_run_id_map. - - Returns: - observation_id - """ - obs_id = str(uuid.uuid4()) - - # establish hierarchy - self._parent_map[run_id] = parent_run_id - - # resolve parent_observation_id (similar to Langfuse _get_parent_observation) - parent_obs_id: Optional[str] = None - if parent_run_id and parent_run_id in self._run_to_obs: - parent_obs_id = self._run_to_obs[parent_run_id] - - record = ObservationRecord( - id=obs_id, - trace_id=self.trace_id, - parent_observation_id=parent_obs_id, - type=obs_type, - name=name, - start_time=time.time() * 1000, - input_data=input_data, - model_name=model_name, - model_provider=model_provider, - model_parameters=model_parameters, - metadata=metadata, - ) - - self._active[obs_id] = record - self._run_to_obs[run_id] = obs_id - self._obs_to_run[obs_id] = run_id - - return obs_id - - def end_observation( - self, - run_id: str, - *, - output_data: Optional[Any] = None, - level: Optional[ObsLevel] = None, - status_message: Optional[str] = None, - prompt_tokens: Optional[int] = None, - completion_tokens: Optional[int] = None, - total_tokens: Optional[int] = None, - status: ObsStatus = ObsStatus.COMPLETED, - ) -> Optional[str]: - """ - Complete an observation and move it to the completed list. - - Modeled after Langfuse _detach_observation(). - - Returns: - observation_id, or None if run_id was not found - """ - obs_id = self._run_to_obs.get(run_id) - if not obs_id: - logger.debug(f"end_observation: no observation for run_id={run_id[:8]}...") - return None - - record = self._active.pop(obs_id, None) - if not record: - logger.debug(f"end_observation: observation {obs_id[:8]} not active") - return obs_id # may have already been ended - - now = time.time() * 1000 - record.end_time = now - record.duration_ms = int(now - record.start_time) - record.status = status - - if output_data is not None: - record.output_data = output_data - if level is not None: - record.level = level - if status_message is not None: - record.status_message = status_message[:2000] # cap length - if prompt_tokens is not None: - record.prompt_tokens = prompt_tokens - if completion_tokens is not None: - record.completion_tokens = completion_tokens - if total_tokens is not None: - record.total_tokens = total_tokens - - self._completed.append(record) - - # clean up mappings - del self._run_to_obs[run_id] - self._obs_to_run.pop(obs_id, None) - - return obs_id - - def get_observation_id(self, run_id: str) -> Optional[str]: - """Return the observation_id for a given run_id.""" - return self._run_to_obs.get(run_id) - - def get_parent_observation_id(self, run_id: str) -> Optional[str]: - """Return the parent observation_id for a run_id (used in SSE envelope).""" - parent_run = self._parent_map.get(run_id) - if parent_run and parent_run in self._run_to_obs: - return self._run_to_obs[parent_run] - return None - - def track_completion_start(self, run_id: str) -> None: - """Record time-to-first-token for a GENERATION observation.""" - obs_id = self._run_to_obs.get(run_id) - if obs_id and obs_id not in self._completion_start_tracked: - record = self._active.get(obs_id) - if record and record.type == ObsType.GENERATION: - record.completion_start_time = time.time() * 1000 - self._completion_start_tracked.add(obs_id) - - def get_all_observations(self) -> list[ObservationRecord]: - """ - Return all observations (completed + incomplete). - Mark incomplete ones as INTERRUPTED. - """ - all_obs = list(self._completed) - for obs in self._active.values(): - obs.status = ObsStatus.INTERRUPTED - obs.end_time = time.time() * 1000 - obs.duration_ms = int(obs.end_time - obs.start_time) - all_obs.append(obs) - return all_obs - - -# ============ StreamEventHandler ============ - - -class StreamEventHandler: - """ - Production-grade streaming event handler. - - All handle_* methods uniformly accept run_id and parent_run_id, - using StreamState's map-based observation management. - """ - - @staticmethod - def _extract_metadata(event: dict) -> dict: - """Extract standardized metadata.""" - metadata = event.get("metadata", {}) - if not isinstance(metadata, dict): - metadata = {} - config = metadata.get("config", {}) - if not isinstance(config, dict): - config = {} - return { - "node_name": metadata.get("langgraph_node") or event.get("name") or "unknown", - "run_id": event.get("run_id", ""), - "tags": config.get("tags") or metadata.get("tags") or event.get("tags") or [], - "timestamp": int(time.time() * 1000), - } - - @staticmethod - def _extract_node_info(event: dict) -> dict: - """Extract node info (name, label, ID, etc.).""" - metadata = event.get("metadata", {}) - if not isinstance(metadata, dict): - metadata = {} - config = metadata.get("config", {}) - if not isinstance(config, dict): - config = {} - - node_name = metadata.get("langgraph_node") or event.get("name") or "unknown" - - tags = config.get("tags") or metadata.get("tags") or event.get("tags") or [] - if not isinstance(tags, list): - tags = [] - - first_tag = tags[0] if tags else None - first_tag_label = first_tag.get("label") if isinstance(first_tag, dict) else None - - node_label = ( - config.get("node_label") - or metadata.get("node_label") - or first_tag_label - or node_name.replace("_", " ").title() - ) - - return { - "node_name": node_name, - "node_label": node_label, - "node_id": config.get("node_id") or metadata.get("node_id"), - "node_type": config.get("node_type") or metadata.get("node_type"), - } - - @staticmethod - def _extract_model_parameters(event: dict) -> Optional[dict]: - """Extract model parameters (temperature, max_tokens, etc.) from a LangGraph event.""" - metadata = event.get("metadata", {}) - if not isinstance(metadata, dict): - return None - invocation_params = metadata.get("ls_model_kwargs") or {} - if not isinstance(invocation_params, dict): - return None - - params = {} - for key in [ - "temperature", - "max_tokens", - "max_completion_tokens", - "top_p", - "frequency_penalty", - "presence_penalty", - "stop", - "request_timeout", - ]: - if key in invocation_params: - params[key] = invocation_params[key] - - return params if params else None - - @staticmethod - def format_sse( - event_type: str, - payload: dict, - thread_id: str, - state: Optional["StreamState"] = None, - ) -> str: - """ - Build a standard SSE envelope. - - Include trace / observation hierarchy info. - Degrade to a simplified event on serialization failure. - """ - meta = payload.pop("_meta", {}) - - def _default(obj: Any) -> Any: - if isinstance(obj, BaseMessage): - return { - "type": obj.__class__.__name__, - "content": str(obj.content) if hasattr(obj, "content") else str(obj), - } - if isinstance(obj, Enum): - return obj.value - if hasattr(obj, "model_dump"): - try: - return obj.model_dump() - except Exception: - logger.debug("model_dump() failed in SSE serializer", exc_info=True) - if hasattr(obj, "dict"): - try: - return obj.dict() - except Exception: - logger.debug("dict() failed in SSE serializer", exc_info=True) - return str(obj) - - envelope = { - "type": event_type, - "thread_id": thread_id, - "run_id": meta.get("run_id", ""), - "node_name": meta.get("node_name", "system"), - "timestamp": meta.get("timestamp", int(time.time() * 1000)), - "tags": meta.get("tags", []), - "trace_id": meta.get("trace_id", state.trace_id if state else ""), - "observation_id": meta.get("observation_id", ""), - "parent_observation_id": meta.get("parent_observation_id", ""), - "data": payload, - } - - try: - return f"data: {json.dumps(envelope, ensure_ascii=False, default=_default)}\n\n" - except (TypeError, ValueError, OverflowError) as e: - logger.warning(f"SSE serialization failed for {event_type}: {e}") - fallback = { - "type": event_type, - "thread_id": thread_id, - "timestamp": int(time.time() * 1000), - "trace_id": state.trace_id if state else "", - "data": {"_serialization_error": str(e)[:200]}, - } - return f"data: {json.dumps(fallback)}\n\n" - - # ==================== Handler Methods ==================== - - # Max chars per message content in the model_input SSE frame. - # The system prompt + full conversation history in skill-creator turns can - # easily exceed 500 KB, causing the WS frame to be dropped by the browser. - _MODEL_INPUT_MSG_CONTENT_LIMIT = 2000 - - @staticmethod - def _truncate_messages_for_sse(messages: list[dict]) -> list[dict]: - """Truncate individual message content so the model_input SSE frame stays small. - - Keeps message structure (role, tool_calls, etc.) intact; only shortens - the 'content' field of each message to avoid oversized WS frames. - """ - limit = StreamEventHandler._MODEL_INPUT_MSG_CONTENT_LIMIT - result = [] - for msg in messages: - content = msg.get("content") - if isinstance(content, str) and len(content) > limit: - msg = {**msg, "content": content[:limit] + "… [truncated]"} - elif isinstance(content, list): - # Multimodal content blocks — truncate text parts - truncated_parts = [] - for part in content: - if isinstance(part, dict) and part.get("type") == "text": - text = part.get("text", "") - if len(text) > limit: - part = {**part, "text": text[:limit] + "… [truncated]"} - truncated_parts.append(part) - msg = {**msg, "content": truncated_parts} - result.append(msg) - return result - - async def handle_chat_model_start( - self, event: dict, state: StreamState, run_id: str, parent_run_id: Optional[str] - ) -> str: - """Handle model start event. Create a GENERATION observation.""" - try: - event_data = event.get("data", {}) - input_data = event_data.get("input", {}) - raw_messages = input_data.get("messages", []) - - serialized_messages = serialize_messages(raw_messages) - - metadata = event.get("metadata", {}) - if not isinstance(metadata, dict): - metadata = {} - model_name = metadata.get("ls_model_name") or event.get("name", "unknown") - model_provider = metadata.get("ls_provider") or "unknown" - model_parameters = self._extract_model_parameters(event) - - obs_id = state.create_observation( - run_id=run_id, - parent_run_id=parent_run_id, - obs_type=ObsType.GENERATION, - name=model_name, - input_data=truncate_data({"messages": serialized_messages}), - model_name=model_name, - model_provider=model_provider, - model_parameters=model_parameters, - ) - - meta = self._extract_metadata(event) - meta["trace_id"] = state.trace_id - meta["observation_id"] = obs_id - meta["parent_observation_id"] = state.get_parent_observation_id(run_id) or "" - - return self.format_sse( - "model_input", - { - "messages": self._truncate_messages_for_sse(serialized_messages), - "model_name": model_name, - "model_provider": model_provider, - "_meta": meta, - }, - state.thread_id, - state, - ) - except Exception as e: - logger.exception(f"handle_chat_model_start failed: {e}") - return self.format_sse( - "model_input", - { - "messages": [], - "model_name": "unknown", - "model_provider": "unknown", - "_meta": self._extract_metadata(event), - }, - state.thread_id, - state, - ) - - async def handle_chat_model_stream( - self, event: dict, state: StreamState, run_id: str, parent_run_id: Optional[str] - ) -> Optional[str]: - """Handle text stream event. Record time-to-first-token.""" - try: - chunk = event.get("data", {}).get("chunk") - if not chunk or not hasattr(chunk, "content") or not chunk.content: - return None - - content = chunk.content - state.append_content(content) - - # record time-to-first-token - state.track_completion_start(run_id) - - obs_id = state.get_observation_id(run_id) or "" - - meta = self._extract_metadata(event) - meta["trace_id"] = state.trace_id - meta["observation_id"] = obs_id - meta["parent_observation_id"] = state.get_parent_observation_id(run_id) or "" - - return self.format_sse("content", {"delta": content, "_meta": meta}, state.thread_id, state) - except Exception as e: - logger.exception(f"handle_chat_model_stream failed: {e}") - return None - - async def handle_chat_model_end( - self, event: dict, state: StreamState, run_id: str, parent_run_id: Optional[str] - ) -> str: - """Handle model end event. Parse token usage precisely (multi-vendor compatible).""" - try: - event_data = event.get("data", {}) - output = event_data.get("output") - - metadata = event.get("metadata", {}) - if not isinstance(metadata, dict): - metadata = {} - model_name = metadata.get("ls_model_name") or event.get("name", "unknown") - model_provider = metadata.get("ls_provider") or "unknown" - - # multi-source token usage extraction - usage = extract_usage_from_output(output) - prompt_tokens = usage.get("input", 0) if usage else 0 - completion_tokens = usage.get("output", 0) if usage else 0 - total_tokens = usage.get("total", 0) if usage else 0 - - # raw usage_metadata for frontend display - usage_metadata = None - if output and hasattr(output, "usage_metadata") and output.usage_metadata: - um = output.usage_metadata - if hasattr(um, "__dict__"): - usage_metadata = {k: v for k, v in um.__dict__.items() if not k.startswith("_")} - elif isinstance(um, dict): - usage_metadata = um - - # complete GENERATION observation - output_summary = truncate_data(str(output), max_length=2000) if output else None - obs_id = state.end_observation( - run_id, - output_data={"output": output_summary} if output_summary else None, - prompt_tokens=prompt_tokens or None, - completion_tokens=completion_tokens or None, - total_tokens=total_tokens or None, - ) - - meta = self._extract_metadata(event) - meta["trace_id"] = state.trace_id - meta["observation_id"] = obs_id or "" - meta["parent_observation_id"] = state.get_parent_observation_id(run_id) or "" - - return self.format_sse( - "model_output", - { - "output": output, - "model_name": model_name, - "model_provider": model_provider, - "usage_metadata": usage_metadata, - "prompt_tokens": prompt_tokens, - "completion_tokens": completion_tokens, - "total_tokens": total_tokens, - "_meta": meta, - }, - state.thread_id, - state, - ) - except Exception as e: - logger.exception(f"handle_chat_model_end failed: {e}") - return self.format_sse( - "model_output", - { - "output": None, - "model_name": "unknown", - "model_provider": "unknown", - "_meta": self._extract_metadata(event), - }, - state.thread_id, - state, - ) - - async def handle_tool_start( - self, event: dict, state: StreamState, run_id: str, parent_run_id: Optional[str] - ) -> str: - """Handle tool start event. Create a TOOL observation.""" - try: - tool_input = event.get("data", {}).get("input", {}) - if isinstance(tool_input, dict): - tool_input = {k: v for k, v in tool_input.items() if k != "runtime"} - - tool_name = event.get("name") - - obs_id = state.create_observation( - run_id=run_id, - parent_run_id=parent_run_id, - obs_type=ObsType.TOOL, - name=tool_name, - input_data=truncate_data({"tool_input": tool_input}), - ) - - meta = self._extract_metadata(event) - meta["trace_id"] = state.trace_id - meta["observation_id"] = obs_id - meta["parent_observation_id"] = state.get_parent_observation_id(run_id) or "" - - return self.format_sse( - "tool_start", - {"tool_name": tool_name, "tool_input": tool_input, "_meta": meta}, - state.thread_id, - state, - ) - except Exception as e: - logger.exception(f"handle_tool_start failed: {e}") - return self.format_sse( - "tool_start", - {"tool_name": event.get("name"), "tool_input": {}, "_meta": self._extract_metadata(event)}, - state.thread_id, - state, - ) - - async def handle_tool_end(self, event: dict, state: StreamState, run_id: str, parent_run_id: Optional[str]) -> str: - """Handle tool end event. Complete the TOOL observation.""" - try: - raw_output = event.get("data", {}).get("output") - output = raw_output.content if hasattr(raw_output, "content") else raw_output - tool_name = event.get("name") - - # detect errors - has_error = _detect_error(output) - - output_summary = truncate_data(str(output), max_length=2000) if output else None - obs_id = state.end_observation( - run_id, - output_data={"tool_output": output_summary} if output_summary else None, - level=ObsLevel.ERROR if has_error else ObsLevel.DEFAULT, - status_message=str(output)[:500] if has_error else None, - status=ObsStatus.FAILED if has_error else ObsStatus.COMPLETED, - ) - - # compute duration - record = None - for rec in state._completed: - if rec.id == obs_id: - record = rec - break - duration = record.duration_ms if record else None - - meta = self._extract_metadata(event) - meta["trace_id"] = state.trace_id - meta["observation_id"] = obs_id or "" - meta["parent_observation_id"] = state.get_parent_observation_id(run_id) or "" - - return self.format_sse( - "tool_end", - { - "tool_name": tool_name, - "tool_output": output, - "duration": duration, - "status": "error" if has_error else "success", - "_meta": meta, - }, - state.thread_id, - state, - ) - except Exception as e: - logger.exception(f"handle_tool_end failed: {e}") - return self.format_sse( - "tool_end", - { - "tool_name": event.get("name"), - "tool_output": None, - "status": "error", - "_meta": self._extract_metadata(event), - }, - state.thread_id, - state, - ) - - async def handle_node_start( - self, event: dict, state: StreamState, run_id: str, parent_run_id: Optional[str] - ) -> str: - """Handle node start event. Create a SPAN observation.""" - try: - node_info = self._extract_node_info(event) - node_name = node_info["node_name"] - - obs_id = state.create_observation( - run_id=run_id, - parent_run_id=parent_run_id, - obs_type=ObsType.SPAN, - name=node_name, - metadata={"node_label": node_info.get("node_label"), "node_type": node_info.get("node_type")}, - ) - - meta = self._extract_metadata(event) - meta.update(node_info) - meta["trace_id"] = state.trace_id - meta["observation_id"] = obs_id - meta["parent_observation_id"] = state.get_parent_observation_id(run_id) or "" - - return self.format_sse( - "node_start", - { - "node_name": node_name, - "node_label": node_info.get("node_label", node_name), - "node_id": node_info.get("node_id"), - "_meta": meta, - }, - state.thread_id, - state, - ) - except Exception as e: - logger.exception(f"handle_node_start failed: {e}") - return self.format_sse( - "node_start", - {"node_name": "unknown", "node_label": "Unknown", "_meta": self._extract_metadata(event)}, - state.thread_id, - state, - ) - - async def handle_node_end( - self, event: dict, state: StreamState, run_id: str, parent_run_id: Optional[str] - ) -> list[str]: - """Handle node end event. Return multiple SSE events.""" - try: - node_info = self._extract_node_info(event) - node_name = node_info["node_name"] - node_type = node_info.get("node_type", "unknown") - - output = event.get("data", {}).get("output") - has_error = _detect_error(output) - - # complete SPAN observation - output_summary = None - if output and isinstance(output, dict): - output_summary = truncate_data( - {k: str(v)[:500] for k, v in list(output.items())[:10]}, - max_length=5000, - ) - - obs_id = state.end_observation( - run_id, - output_data=output_summary, - level=ObsLevel.ERROR if has_error else ObsLevel.DEFAULT, - status_message=str(output)[:500] if has_error else None, - status=ObsStatus.FAILED if has_error else ObsStatus.COMPLETED, - ) - - # compute duration - record = None - for rec in state._completed: - if rec.id == obs_id: - record = rec - break - duration = record.duration_ms if record else None - - meta = self._extract_metadata(event) - meta.update(node_info) - meta["trace_id"] = state.trace_id - meta["observation_id"] = obs_id or "" - meta["parent_observation_id"] = state.get_parent_observation_id(run_id) or "" - - events: list[str] = [] - - # 0. CodeAgent events - if output and isinstance(output, dict): - code_agent_events = output.get("code_agent_events", []) - if code_agent_events: - events.extend(self._process_code_agent_events(code_agent_events, node_name, meta, state)) - - # get the current node's local output (if using Option B data flow) - local_payload = None - if output and isinstance(output, dict): - node_outputs = output.get("node_outputs", {}) - if node_id := node_info.get("node_id"): - local_payload = node_outputs.get(node_id) - elif node_name in node_outputs: # Fallback backwards compat - local_payload = node_outputs.get(node_name) - - # 1. node_end event - events.append( - self.format_sse( - "node_end", - { - "node_name": node_name, - "node_label": node_info.get("node_label", node_name), - "node_id": node_info.get("node_id"), - "duration": duration, - "status": "error" if has_error else "success", - "payload": local_payload, # Option B localized output - "_meta": meta, - }, - state.thread_id, - state, - ) - ) - - # 2. Command / state related events - if output and isinstance(output, dict): - events.extend(self._process_output_events(output, node_info, node_type, meta, state)) - - return events - except Exception as e: - logger.exception(f"handle_node_end failed: {e}") - meta = self._extract_metadata(event) - return [ - self.format_sse( - "node_end", - {"node_name": "unknown", "status": "error", "_meta": meta}, - state.thread_id, - state, - ) - ] - - # ==================== Private Helpers ==================== - - def _process_code_agent_events( - self, code_agent_events: list, node_name: str, meta: dict, state: StreamState - ) -> list[str]: - """Process a list of CodeAgent events.""" - events = [] - type_map = { - "thought": "code_agent_thought", - "code": "code_agent_code", - "observation": "code_agent_observation", - "final_answer": "code_agent_final_answer", - "planning": "code_agent_planning", - "error": "code_agent_error", - } - - for ca_event in code_agent_events: - ca_type = ca_event.get("type", "unknown") - ca_content = ca_event.get("content", "") - ca_step = ca_event.get("step", 0) - ca_metadata = ca_event.get("metadata", {}) - - sse_type = type_map.get(ca_type) - if not sse_type: - continue - - payload: dict[str, Any] = {"node_name": node_name, "step": ca_step, "_meta": meta} - - if ca_type == "thought": - payload["content"] = ca_content - elif ca_type == "code": - payload["code"] = ca_content - elif ca_type == "observation": - payload["observation"] = ca_content - payload["has_error"] = bool(ca_metadata.get("error")) - elif ca_type == "final_answer": - payload["answer"] = ca_content - elif ca_type == "planning": - payload["plan"] = ca_content - payload["is_update"] = ca_metadata.get("is_update", False) - elif ca_type == "error": - payload["error"] = ca_content - - events.append(self.format_sse(sse_type, payload, state.thread_id, state)) - - return events - - def _process_output_events( - self, output: dict, node_info: dict, node_type: str, meta: dict, state: StreamState - ) -> list[str]: - """Process Command / route / loop / parallel events from node output.""" - events = [] - node_name = node_info["node_name"] - route_decision = output.get("route_decision") - route_reason = output.get("route_reason") - - # routing decision - if node_type in ["condition", "router", "loop"] and route_decision: - events.append( - self.format_sse( - "route_decision", - { - "node_id": node_info.get("node_id") or node_name, - "node_type": node_type, - "result": route_decision, - "reason": route_reason or f"routing decision: {route_decision}", - "goto": "unknown", - }, - state.thread_id, - state, - ) - ) - - # Command events - cleaned_update = {} - for k, v in output.items(): - if k in ["route_decision", "route_reason"]: - continue - if k == "task_results" and isinstance(v, list): - cleaned_update[k] = _clean_task_results(v) - else: - cleaned_update[k] = v - - events.append( - self.format_sse( - "command", - {"update": cleaned_update, "goto": None, "reason": route_reason}, - state.thread_id, - state, - ) - ) - - # loop iteration - loop_count = output.get("loop_count") - if loop_count is not None: - events.append( - self.format_sse( - "loop_iteration", - { - "loop_node_id": node_info.get("node_id") or node_name, - "iteration": loop_count, - "max_iterations": output.get("max_loop_iterations", 0), - "condition_met": output.get("loop_condition_met", False), - "reason": output.get("route_reason") or f"iteration {loop_count}", - }, - state.thread_id, - state, - ) - ) - - # parallel tasks - task_states = output.get("task_states") - if task_states and isinstance(task_states, dict): - for task_id, task_state in task_states.items(): - if isinstance(task_state, dict): - status_map = {"running": "started", "completed": "completed", "error": "error"} - events.append( - self.format_sse( - "parallel_task", - { - "task_id": task_id, - "status": status_map.get(task_state.get("status", ""), "started"), - "result": task_state.get("result"), - "error_msg": task_state.get("error_msg"), - }, - state.thread_id, - state, - ) - ) - - # state update - updated_fields = [k for k in output.keys() if k not in ["route_decision", "route_reason"]] - if updated_fields: - events.append( - self.format_sse( - "state_update", - {"updated_fields": updated_fields, "state_snapshot": output}, - state.thread_id, - state, - ) - ) - - return events - - -# ============ Module-level Helpers ============ - - -def _detect_error(output: Any) -> bool: - """Detect whether the output contains error information.""" - if isinstance(output, dict): - return any(output.get(k) is not None for k in ("error", "exception", "Error")) - if isinstance(output, str): - lower = output.lower() - return any(kw in lower for kw in ("error", "exception", "failed", "failure")) - return False - - -def _clean_task_results(task_results: list) -> list: - """Remove circular references from task_results.""" - cleaned = [] - for tr in task_results: - if isinstance(tr, dict): - result = {"status": tr.get("status"), "task_id": tr.get("task_id")} - if "error_msg" in tr: - result["error_msg"] = tr.get("error_msg") - rv = tr.get("result") - if isinstance(rv, dict): - result["result"] = {k: v for k, v in rv.items() if k != "task_results"} - else: - result["result"] = rv - cleaned.append(result) - else: - cleaned.append(tr) - return cleaned diff --git a/backend/app/utils/string.py b/backend/app/utils/string.py index 31bac13bf..78328c8b3 100644 --- a/backend/app/utils/string.py +++ b/backend/app/utils/string.py @@ -1,9 +1,7 @@ import hashlib import json import re -import uuid from typing import Optional, Tuple, Type -from uuid import uuid4 from loguru import logger from pydantic import BaseModel, ValidationError @@ -199,19 +197,3 @@ def parse_response_model_str(content: str, output_schema: Type[BaseModel]) -> Op logger.warning("All parsing attempts failed.") return structured_output - - -def generate_id(seed: Optional[str] = None) -> str: - """ - Generate a deterministic UUID5 based on a seed string. - If no seed is provided, generate a random UUID4. - - Args: - seed (str): The seed string to generate the UUID from. - - Returns: - str: A deterministic UUID5 string. - """ - if seed is None: - return str(uuid4()) - return str(uuid.uuid5(uuid.NAMESPACE_DNS, seed)) diff --git a/backend/app/utils/token_usage.py b/backend/app/utils/token_usage.py index 0ae18c3aa..648c78565 100644 --- a/backend/app/utils/token_usage.py +++ b/backend/app/utils/token_usage.py @@ -140,9 +140,6 @@ def extract_usage_from_output(output: Any) -> Optional[dict[str, int]]: # 4. amazon-bedrock-invocationMetrics (Bedrock-Titan) if raw_usage is None: raw_usage = rm.get("amazon-bedrock-invocationMetrics") - # 5. usage_metadata (legacy fallback) - if raw_usage is None: - raw_usage = rm.get("usage_metadata") if raw_usage is None: return None diff --git a/backend/app/websocket/chat_commands.py b/backend/app/websocket/chat_commands.py deleted file mode 100644 index c0a1c6770..000000000 --- a/backend/app/websocket/chat_commands.py +++ /dev/null @@ -1,134 +0,0 @@ -"""Command types and dispatch for chat WebSocket frames.""" - -from __future__ import annotations - -import uuid as uuid_lib -from dataclasses import dataclass, field -from typing import Any, Mapping - -from app.websocket.chat_protocol import ParsedChatExtension, ParsedChatStartFrame, ParsedCopilotExtension - - -@dataclass(frozen=True) -class StandardChatTurnCommand: - """Command representing a normal user chat message.""" - - request_id: str - message: str - thread_id: str | None - graph_id: uuid_lib.UUID | None - provider_name: str | None - model_name: str | None - metadata: dict[str, Any] - files: list[dict[str, Any]] - - -@dataclass(frozen=True) -class SkillCreatorTurnCommand(StandardChatTurnCommand): - """Command for a Skill Creator turn, extending the standard command.""" - - run_id: str | None - edit_skill_id: str | None - - -@dataclass(frozen=True) -class ChatRunTurnCommand(StandardChatTurnCommand): - """Command for a Chat run turn, extending the standard command.""" - - run_id: str | None = None - - -@dataclass(frozen=True) -class CopilotTurnCommand(StandardChatTurnCommand): - """Command for a Copilot turn, extending the standard command.""" - - run_id: str | None = None - graph_context: dict[str, Any] = field(default_factory=dict) - conversation_history: list[dict[str, Any]] = field(default_factory=list) - mode: str = "deepagents" - - -ChatTurnCommand = StandardChatTurnCommand | SkillCreatorTurnCommand | ChatRunTurnCommand | CopilotTurnCommand - - -def build_command_from_parsed_frame(frame: ParsedChatStartFrame) -> ChatTurnCommand: - """Convert a validated ParsedChatStartFrame into a ChatTurnCommand.""" - metadata, files = _sanitize_metadata_files(frame.metadata, frame.input.files) - provider_name = frame.input.provider_name - model_name = frame.input.model_name - - extension = frame.extension - if extension is None: - return StandardChatTurnCommand( - request_id=frame.request_id, - message=frame.input.message, - thread_id=frame.thread_id, - graph_id=frame.graph_id, - provider_name=provider_name, - model_name=model_name, - metadata=metadata, - files=files, - ) - - if isinstance(extension, ParsedCopilotExtension): - return CopilotTurnCommand( - request_id=frame.request_id, - message=frame.input.message, - thread_id=frame.thread_id, - graph_id=frame.graph_id, - provider_name=provider_name, - model_name=model_name, - metadata=metadata, - files=files, - run_id=extension.run_id, - graph_context=extension.graph_context, - conversation_history=extension.conversation_history, - mode=extension.mode, - ) - - if isinstance(extension, ParsedChatExtension): - return ChatRunTurnCommand( - request_id=frame.request_id, - message=frame.input.message, - thread_id=frame.thread_id, - graph_id=frame.graph_id, - provider_name=provider_name, - model_name=model_name, - metadata=metadata, - files=files, - run_id=extension.run_id, - ) - - # skill_creator path - if extension.edit_skill_id: - metadata["edit_skill_id"] = extension.edit_skill_id - - return SkillCreatorTurnCommand( - request_id=frame.request_id, - message=frame.input.message, - thread_id=frame.thread_id, - graph_id=frame.graph_id, - provider_name=provider_name, - model_name=model_name, - metadata=metadata, - files=files, - run_id=extension.run_id, - edit_skill_id=extension.edit_skill_id, - ) - - -def _normalize_files(files: list[Any]) -> list[dict[str, Any]]: - return [f for f in files if isinstance(f, dict)] - - -def _sanitize_metadata_files( - metadata: Mapping[str, Any], raw_files: Any -) -> tuple[dict[str, Any], list[dict[str, Any]]]: - sanitized = dict(metadata) - sanitized.pop("files", None) - - files = _normalize_files(raw_files if isinstance(raw_files, list) else []) - if files: - sanitized["files"] = files - - return sanitized, files diff --git a/backend/app/websocket/chat_protocol.py b/backend/app/websocket/chat_protocol.py deleted file mode 100644 index 3c5bfd036..000000000 --- a/backend/app/websocket/chat_protocol.py +++ /dev/null @@ -1,197 +0,0 @@ -"""Chat WebSocket protocol: frame parsing, validation, and message types.""" - -from __future__ import annotations - -import uuid as uuid_lib -from dataclasses import dataclass -from typing import Any, Literal - -RESERVED_METADATA_KEYS = {"mode", "run_id", "edit_skill_id", "extension", "kind", "files"} -ALLOWED_CLIENT_FRAME_TYPES = { - "ping", - "chat.start", - "chat.resume", - "chat.stop", -} - - -class ChatProtocolError(Exception): - """Raised when a client frame violates the chat protocol.""" - - def __init__(self, message: str, request_id: str | None = None) -> None: - super().__init__(message) - self.message = message - self.request_id = request_id - - -@dataclass(frozen=True) -class ParsedChatInput: - """Validated user input extracted from a chat.start frame.""" - - message: str - files: list[dict[str, Any]] - provider_name: str | None - model_name: str | None - - -@dataclass(frozen=True) -class ParsedSkillCreatorExtension: - """Extension payload for Skill Creator turns.""" - - kind: Literal["skill_creator"] - run_id: str | None - edit_skill_id: str | None - - -@dataclass(frozen=True) -class ParsedChatExtension: - """Extension payload for Chat run turns.""" - - kind: Literal["chat"] - run_id: str | None - - -@dataclass(frozen=True) -class ParsedCopilotExtension: - """Extension payload for Copilot turns.""" - - kind: Literal["copilot"] - run_id: str | None - graph_context: dict[str, Any] - conversation_history: list[dict[str, Any]] - mode: str - - -@dataclass(frozen=True) -class ParsedChatStartFrame: - """Fully validated chat.start frame ready for command construction.""" - - request_id: str - thread_id: str | None - graph_id: uuid_lib.UUID | None - input: ParsedChatInput - extension: ParsedSkillCreatorExtension | ParsedChatExtension | ParsedCopilotExtension | None - metadata: dict[str, Any] - - -def parse_client_frame(frame: dict[str, Any]) -> ParsedChatStartFrame | dict[str, Any]: - """Parse and validate a raw client JSON frame. - - Returns: - A ParsedChatStartFrame for chat.start frames, or the raw dict - for other recognized frame types (ping, resume, stop). - - Raises: - ChatProtocolError: If the frame type is unknown or invalid. - """ - frame_type = str(frame.get("type") or "") - if frame_type not in ALLOWED_CLIENT_FRAME_TYPES: - raise ChatProtocolError(f"unknown frame type: {frame_type or ''}") - if frame_type == "chat.start": - return _parse_chat_start_frame(frame) - return frame - - -def _parse_chat_start_frame(frame: dict[str, Any]) -> ParsedChatStartFrame: - request_id = _coerce_request_id(frame.get("request_id")) - if not request_id: - raise ChatProtocolError("chat.start frame must include request_id") - - metadata_raw = frame.get("metadata") - metadata = dict(metadata_raw) if isinstance(metadata_raw, dict) else {} - reserved = RESERVED_METADATA_KEYS.intersection(metadata.keys()) - if reserved: - raise ChatProtocolError( - "reserved metadata keys are not allowed", - request_id=request_id, - ) - - input_payload = frame.get("input") - if not isinstance(input_payload, dict): - raise ChatProtocolError("chat.start frame must include an input object", request_id=request_id) - - message = str(input_payload.get("message") or "") - files_raw = input_payload.get("files") - files = [f for f in files_raw if isinstance(f, dict)] if isinstance(files_raw, list) else [] - provider_name_raw = input_payload.get("provider_name") - provider_name = str(provider_name_raw).strip() if provider_name_raw else None - model_name_raw = input_payload.get("model_name") - model_name = str(model_name_raw).strip() if model_name_raw else None - - extension = _parse_extension(frame.get("extension"), request_id) - - thread_id = _coerce_request_id(frame.get("thread_id")) - graph_id = _coerce_optional_uuid(frame.get("graph_id"), request_id=request_id, field_name="graph_id") - - return ParsedChatStartFrame( - request_id=request_id, - thread_id=thread_id, - graph_id=graph_id, - input=ParsedChatInput(message=message, files=files, provider_name=provider_name, model_name=model_name), - extension=extension, - metadata=metadata, - ) - - -def _parse_extension( - raw_extension: Any, request_id: str -) -> ParsedSkillCreatorExtension | ParsedChatExtension | ParsedCopilotExtension | None: - if raw_extension is None: - return None - if not isinstance(raw_extension, dict): - raise ChatProtocolError("extension must be an object", request_id=request_id) - - kind = raw_extension.get("kind") - run_id = _coerce_request_id(raw_extension.get("run_id")) - - if kind == "skill_creator": - edit_skill_id = _coerce_request_id(raw_extension.get("edit_skill_id")) - return ParsedSkillCreatorExtension(kind="skill_creator", run_id=run_id, edit_skill_id=edit_skill_id) - - if kind == "chat": - return ParsedChatExtension(kind="chat", run_id=run_id) - - if kind == "copilot": - graph_context = raw_extension.get("graph_context") - if not isinstance(graph_context, dict): - raise ChatProtocolError("copilot extension requires graph_context object", request_id=request_id) - conversation_history_raw = raw_extension.get("conversation_history") - conversation_history = ( - [item for item in conversation_history_raw if isinstance(item, dict)] - if isinstance(conversation_history_raw, list) - else [] - ) - mode = str(raw_extension.get("mode") or "deepagents") - return ParsedCopilotExtension( - kind="copilot", - run_id=run_id, - graph_context=graph_context, - conversation_history=conversation_history, - mode=mode, - ) - - raise ChatProtocolError( - f"unsupported extension kind: {kind or ''}", - request_id=request_id, - ) - - -def _coerce_request_id(value: Any) -> str | None: - if value is None: - return None - text = str(value).strip() - return text or None - - -def _coerce_optional_uuid(value: Any, *, request_id: str, field_name: str) -> uuid_lib.UUID | None: - text = _coerce_request_id(value) - if text is None: - return None - - try: - return uuid_lib.UUID(text) - except (ValueError, TypeError) as exc: - raise ChatProtocolError( - f"chat.start frame {field_name} must be a valid UUID", - request_id=request_id, - ) from exc diff --git a/backend/app/websocket/chat_task_supervisor.py b/backend/app/websocket/chat_task_supervisor.py deleted file mode 100644 index 8adaa5c90..000000000 --- a/backend/app/websocket/chat_task_supervisor.py +++ /dev/null @@ -1,202 +0,0 @@ -"""Supervisor for cancellable async tasks tied to a WebSocket connection.""" - -from __future__ import annotations - -import asyncio -import uuid as uuid_lib -from dataclasses import dataclass -from typing import Any, Awaitable, Callable, Coroutine, cast - -from loguru import logger - -from app.utils.task_manager import task_manager - -_UNSET = object() - - -@dataclass -class ChatTaskEntry: - """Tracks an in-flight chat turn and its associated asyncio task.""" - - thread_id: str | None - task: asyncio.Task[Any] - heartbeat_task: asyncio.Task[Any] | None = None - run_id: uuid_lib.UUID | None = None - persist_on_disconnect: bool = False - request_id: str = "" - - -class ChatTaskSupervisor: - """Manages the lifecycle of concurrent chat tasks for one connection. - - Provides request-id and thread-id based lookup, cancellation, - and graceful cleanup on disconnect. - """ - - def __init__( - self, - *, - stop_task: Callable[[str], Awaitable[None]] | None = None, - ) -> None: - """Initialize the supervisor. - - Args: - stop_task: Optional callback to stop a task by thread_id; - falls back to the global task_manager. - """ - self._tasks: dict[str, ChatTaskEntry] = {} - self._thread_to_request: dict[str, str] = {} - self._stop_task = stop_task - - @property - def tasks(self) -> dict[str, ChatTaskEntry]: - """Return the internal request_id-to-entry mapping.""" - return self._tasks - - def register(self, request_id: str, entry: ChatTaskEntry) -> None: - """Register an already-created task entry.""" - if not entry.request_id: - entry.request_id = request_id - self._tasks[request_id] = entry - self._bind_thread(request_id, entry.thread_id) - - def create_task( - self, - request_id: str, - runner: Coroutine[Any, Any, Any], - *, - name: str, - thread_id: str | None, - run_id: uuid_lib.UUID | None = None, - persist_on_disconnect: bool = False, - ) -> ChatTaskEntry: - """Create an asyncio task from a coroutine, register it, and return the entry.""" - task = asyncio.create_task(runner, name=name) - entry = ChatTaskEntry( - request_id=request_id, - thread_id=thread_id, - task=task, - run_id=run_id, - persist_on_disconnect=persist_on_disconnect, - ) - self.register(request_id, entry) - return entry - - def get(self, request_id: str) -> ChatTaskEntry | None: - """Look up a task entry by request_id.""" - return self._tasks.get(request_id) - - def get_by_thread(self, thread_id: str) -> ChatTaskEntry | None: - """Look up the most recent task entry for a thread.""" - request_id = self._thread_to_request.get(thread_id) - if request_id is not None: - return self._tasks.get(request_id) - return None - - def update( - self, - request_id: str, - *, - thread_id: str | None | object = _UNSET, - task: asyncio.Task[Any] | object = _UNSET, - heartbeat_task: asyncio.Task[Any] | None | object = _UNSET, - run_id: uuid_lib.UUID | None | object = _UNSET, - persist_on_disconnect: bool | object = _UNSET, - ) -> ChatTaskEntry | None: - """Patch fields on an existing entry, skipping any that are _UNSET.""" - entry = self._tasks.get(request_id) - if entry is None: - return None - - if thread_id is not _UNSET: - thread_id_value = cast(str | None, thread_id) - entry.thread_id = thread_id_value - self._bind_thread(request_id, thread_id_value) - if task is not _UNSET: - entry.task = cast(asyncio.Task[Any], task) - if heartbeat_task is not _UNSET: - entry.heartbeat_task = cast(asyncio.Task[Any] | None, heartbeat_task) - if run_id is not _UNSET: - entry.run_id = cast(uuid_lib.UUID | None, run_id) - if persist_on_disconnect is not _UNSET: - entry.persist_on_disconnect = cast(bool, persist_on_disconnect) - - return entry - - def has_request(self, request_id: str) -> bool: - """Return True if a task is tracked under the given request_id.""" - return request_id in self._tasks - - def is_thread_active(self, thread_id: str) -> bool: - """Return True if any tracked task is running on the given thread.""" - request_id = self._thread_to_request.get(thread_id) - if request_id is not None: - entry = self._tasks.get(request_id) - if entry is not None and entry.thread_id == thread_id: - return True - self._thread_to_request.pop(thread_id, None) - - for mapped_request_id, entry in self._tasks.items(): - if entry.thread_id == thread_id: - self._thread_to_request[thread_id] = mapped_request_id - return True - return False - - async def stop_by_request_id(self, request_id: str) -> None: - """Signal a stop and cancel the asyncio task for the given request.""" - entry = self._tasks.get(request_id) - if entry is None: - return - - if entry.thread_id: - try: - await self._stop_thread(entry.thread_id) - except Exception: - logger.debug("chat task supervisor cleanup error", exc_info=True) - - entry.task.cancel() - if entry.heartbeat_task is not None: - entry.heartbeat_task.cancel() - - async def finalize(self, request_id: str) -> ChatTaskEntry | None: - """Remove a task entry and cancel its heartbeat, returning the entry.""" - entry = self._tasks.pop(request_id, None) - if entry and entry.thread_id: - self._thread_to_request.pop(entry.thread_id, None) - if entry and entry.heartbeat_task is not None: - entry.heartbeat_task.cancel() - try: - await entry.heartbeat_task - except asyncio.CancelledError: - logger.debug("task cancelled during cleanup") - return entry - - async def cancel_all(self) -> None: - """Cancel all non-persistent tasks and await their completion.""" - cancellable = [ - (request_id, entry) for request_id, entry in list(self._tasks.items()) if not entry.persist_on_disconnect - ] - - for request_id, _ in cancellable: - await self.stop_by_request_id(request_id) - - for request_id, entry in cancellable: - try: - await entry.task - except BaseException: - logger.debug("suppressed exception during final cleanup", exc_info=True) - if request_id in self._tasks: - await self.finalize(request_id) - - def _bind_thread(self, request_id: str, thread_id: str | None) -> None: - for existing_thread_id, existing_request_id in list(self._thread_to_request.items()): - if existing_request_id == request_id: - self._thread_to_request.pop(existing_thread_id, None) - if thread_id: - self._thread_to_request[thread_id] = request_id - - async def _stop_thread(self, thread_id: str) -> None: - if self._stop_task is not None: - await self._stop_task(thread_id) - return - await task_manager.stop_task(thread_id) diff --git a/backend/app/websocket/chat_turn_executor.py b/backend/app/websocket/chat_turn_executor.py deleted file mode 100644 index 64392cd1b..000000000 --- a/backend/app/websocket/chat_turn_executor.py +++ /dev/null @@ -1,1006 +0,0 @@ -"""Executor for a single chat turn (standard or resume).""" - -from __future__ import annotations - -import asyncio -import importlib -import time -import uuid as uuid_lib -from dataclasses import dataclass -from typing import Any - -from loguru import logger as _logger - -from app.common.exceptions import ModelConfigError -from app.models.agent_run import AgentRunStatus -from app.schemas.chat import ChatRequest -from app.utils.stream_event_handler import StreamState -from app.websocket.chat_commands import ChatRunTurnCommand, ChatTurnCommand, CopilotTurnCommand, SkillCreatorTurnCommand -from app.websocket.chat_task_supervisor import ChatTaskEntry - - -async def _release_graph_sandbox(built_graph: Any) -> None: - """Release sandbox handle attached to a compiled graph, if any.""" - if built_graph is None: - return - handle = getattr(built_graph, "_sandbox_handle", None) - if handle is not None: - try: - await handle.release() - except Exception as exc: - _logger.warning(f"Failed to release sandbox handle: {exc}") - - -@dataclass(frozen=True) -class PreparedStandardTurn: - """Immutable bundle of validated parameters ready for turn execution.""" - - request_id: str - payload: ChatRequest - run_id: uuid_lib.UUID | None - persist_on_disconnect: bool - - -class ChatTurnExecutor: - """Runs a single chat turn against the LangGraph agent. - - Handles both new-message (standard) turns and resume turns for - interrupted graph executions. - """ - - def __init__( - self, - *, - handler: Any, - dependencies: Any | None = None, - ) -> None: - """Initialize the executor. - - Args: - handler: The owning ChatWsHandler instance. - dependencies: Optional module providing helper functions; defaults - to the handler's own module. - """ - self._handler = handler - self._module = dependencies or importlib.import_module(handler.__class__.__module__) - - def prepare_standard_turn(self, command: ChatTurnCommand) -> PreparedStandardTurn: - """Build a PreparedStandardTurn from a client command.""" - metadata = dict(command.metadata or {}) - if command.files: - metadata["files"] = command.files - - run_id: uuid_lib.UUID | None = None - persist_on_disconnect = False - if isinstance(command, SkillCreatorTurnCommand): - run_id = self._parse_uuid(command.run_id) - persist_on_disconnect = run_id is not None - if command.edit_skill_id and "edit_skill_id" not in metadata: - metadata["edit_skill_id"] = command.edit_skill_id - elif isinstance(command, ChatRunTurnCommand): - run_id = self._parse_uuid(command.run_id) - persist_on_disconnect = run_id is not None - elif isinstance(command, CopilotTurnCommand): - run_id = self._parse_uuid(command.run_id) - persist_on_disconnect = run_id is not None - - return PreparedStandardTurn( - request_id=str(command.request_id or ""), - payload=ChatRequest( - message=str(command.message or ""), - thread_id=str(command.thread_id) if command.thread_id else None, - graph_id=command.graph_id, - provider_name=command.provider_name, - model_name=command.model_name, - metadata=metadata, - ), - run_id=run_id, - persist_on_disconnect=persist_on_disconnect, - ) - - async def run_standard_turn(self, prepared: PreparedStandardTurn) -> None: - """Dispatch a prepared standard turn to the handler or fallback executor.""" - run_chat_turn = getattr(self._handler, "_run_chat_turn", None) - if not callable(run_chat_turn): - run_chat_turn = self.execute_standard_turn - await run_chat_turn(request_id=prepared.request_id, payload=prepared.payload) - - async def run_resume_turn(self, request_id: str, thread_id: str, command: dict[str, object]) -> None: - """Dispatch a resume turn to the handler or fallback executor.""" - run_resume_turn = getattr(self._handler, "_run_resume_turn", None) - if not callable(run_resume_turn): - run_resume_turn = self.execute_resume_turn - await run_resume_turn(request_id=request_id, thread_id=thread_id, command=command) - - async def execute_standard_turn(self, request_id: str, payload: ChatRequest) -> None: - """Stream a new user message through the graph and emit events to the client.""" - from app.core.trace_context import set_trace_id - - set_trace_id(request_id) - handler = self._handler - module = self._module - state: StreamState | None = None - thread_id: str | None = None - built_graph = None - graph_workspace_id: str | None = None - graph_display_name: str | None = None - artifact_collector = module.ArtifactCollector() - task_entry = handler._task_supervisor.get(request_id) - agent_run_id = task_entry.run_id if task_entry else None - tolerate_disconnect = bool(task_entry and task_entry.persist_on_disconnect) - assistant_message_id = f"msg-assistant-{uuid_lib.uuid4()}" - - try: - file_emitter = module.FileEventEmitter() - async with module.AsyncSessionLocal() as db: - thread_id, _ = await module.get_or_create_conversation( - payload.thread_id, - payload.message, - handler.user_id, - payload.metadata, - db, - ) - await module.save_user_message(thread_id, payload.message, payload.metadata, db) - config, base_context = await module.get_user_config(handler.user_id, thread_id) - - initial_context = base_context.copy() - if payload.graph_id: - from app.repositories.graph import GraphRepository - - graph_repo = GraphRepository(db) - graph_model = await graph_repo.get(payload.graph_id) - if graph_model: - ws_id = getattr(graph_model, "workspace_id", None) - graph_workspace_id = str(ws_id) if ws_id else None - graph_display_name = getattr(graph_model, "name", None) or getattr(graph_model, "title", None) - if graph_model and graph_model.variables: - context_vars = graph_model.variables.get("context", {}) - if context_vars: - for key, value in context_vars.items(): - if isinstance(value, dict) and "value" in value: - initial_context[key] = value["value"] - else: - initial_context[key] = value - - graph_service = module.GraphService(db) - if payload.graph_id is None: - built_graph = await graph_service.create_default_deep_agents_graph( - user_id=handler.user_id, - file_emitter=file_emitter, - ) - else: - from app.repositories.user import UserRepository - - user_repo = UserRepository(db) - current_user = await user_repo.get_by_id(handler.user_id) - built_graph = await graph_service.create_graph_by_graph_id( - graph_id=payload.graph_id, - user_id=handler.user_id, - current_user=current_user, - file_emitter=file_emitter, - thread_id=thread_id, - ) - - state = module.StreamState(thread_id) - current_task = asyncio.current_task() - if current_task is None: - raise RuntimeError("missing current asyncio task") - if handler._task_supervisor.get(request_id) is None: - handler._task_supervisor.register( - request_id, - ChatTaskEntry( - request_id=request_id, - thread_id=thread_id, - task=current_task, - run_id=agent_run_id, - persist_on_disconnect=tolerate_disconnect, - ), - ) - else: - handler._task_supervisor.update( - request_id, - thread_id=thread_id, - task=current_task, - run_id=agent_run_id, - persist_on_disconnect=tolerate_disconnect, - ) - await module.task_manager.register_task(thread_id, current_task) - - if agent_run_id is not None: - await handler._mark_run_status( - run_id=agent_run_id, - status=AgentRunStatus.RUNNING, - runtime_owner_id=handler._runtime_owner_id, - ) - heartbeat_task = asyncio.create_task( - handler._run_persisted_run_heartbeat(agent_run_id), - name=f"run-heartbeat:{agent_run_id}", - ) - handler._task_supervisor.update(request_id, heartbeat_task=heartbeat_task) - await handler._append_run_event( - run_id=agent_run_id, - event_type="assistant_message_started", - payload={ - "message": { - "id": assistant_message_id, - "role": "assistant", - "content": "", - "timestamp": int(time.time() * 1000), - "tool_calls": [], - } - }, - ) - - await handler._send( - { - "type": "accepted", - "request_id": request_id, - "thread_id": thread_id, - "run_id": str(agent_run_id) if agent_run_id is not None else None, - "timestamp": int(time.time() * 1000), - "data": {"status": "accepted"}, - }, - tolerate_disconnect=tolerate_disconnect, - ) - - stream_handler = module.StreamEventHandler() - artifact_collector.ensure_run_dir(handler.user_id, thread_id, state.artifact_run_id) - - await handler._send_stream_event( - stream_handler.format_sse( - "status", - {"status": "connected", "_meta": {"node_name": "system"}}, - thread_id, - ), - request_id, - tolerate_disconnect=tolerate_disconnect, - agent_run_id=agent_run_id, - assistant_message_id=assistant_message_id, - ) - - enriched_message = module._enrich_message( - payload.message, - payload.metadata, - is_new_thread=(payload.thread_id is None), - log=module.logger.bind(user_id=handler.user_id, thread_id=thread_id), - endpoint="Chat WS", - ) - - interrupted = False - async for event in built_graph.astream_events( - {"messages": [module.HumanMessage(content=enriched_message)], "context": initial_context}, - config=config, - version="v2", - ): - if await module.task_manager.is_stopped(thread_id): - state.stopped = True - break - - async for sse_str in module._dispatch_stream_event(event, stream_handler, state, file_emitter): - await handler._send_stream_event( - sse_str, - request_id, - tolerate_disconnect=tolerate_disconnect, - agent_run_id=agent_run_id, - assistant_message_id=assistant_message_id, - ) - - try: - snap = await module.safe_get_state( - built_graph, - config, - max_retries=3, - initial_delay=0.1, - log=module.logger, - ) - if snap.tasks: - next_node = snap.tasks[0].target if snap.tasks else None - current_state = snap.values or {} - if payload.graph_id is None: - module.logger.warning( - f"Default agent interrupted, resume not supported | thread_id={thread_id}" - ) - else: - await handler._emit_event( - { - "type": "interrupt", - "thread_id": thread_id, - "node_name": next_node or "unknown", - "node_label": next_node.replace("_", " ").title() if next_node else "Unknown Node", - "data": { - "node_name": next_node or "unknown", - "node_label": next_node.replace("_", " ").title() if next_node else "Unknown Node", - "state": current_state, - "thread_id": thread_id, - }, - }, - request_id=request_id, - tolerate_disconnect=tolerate_disconnect, - agent_run_id=agent_run_id, - assistant_message_id=assistant_message_id, - ) - async with module.AsyncSessionLocal() as session: - result_query = await session.execute( - module.select(module.Conversation).where(module.Conversation.thread_id == thread_id) - ) - if conv := result_query.scalar_one_or_none(): - if not conv.meta_data: - conv.meta_data = {} - conv.meta_data["interrupted_graph_id"] = str(payload.graph_id) - await session.commit() - state.interrupted = True - state.interrupt_node = next_node - state.interrupt_state = current_state - interrupted = True - except Exception as exc: - module.logger.warning(f"Failed to inspect interrupt state | thread_id={thread_id} | error={exc}") - - if state and not state.all_messages and not state.stopped and not interrupted: - try: - snap = await module.safe_get_state( - built_graph, - config, - max_retries=2, - initial_delay=0.05, - log=module.logger, - ) - if snap.values and "messages" in snap.values: - msgs = snap.values["messages"] - from langgraph.types import Overwrite - - state.all_messages = msgs.value if isinstance(msgs, Overwrite) else msgs - except Exception as exc: - module.logger.warning(f"Failed to fetch final state | thread_id={thread_id} | error={exc}") - - if state.interrupted: - return - - if state.stopped: - await handler._emit_event( - { - "type": "error", - "thread_id": thread_id, - "node_name": "system", - "run_id": "", - "timestamp": int(time.time() * 1000), - "data": {"message": "Stopped by user", "code": "stopped"}, - }, - request_id=request_id, - tolerate_disconnect=tolerate_disconnect, - agent_run_id=agent_run_id, - assistant_message_id=assistant_message_id, - ) - - await handler._emit_event( - { - "type": "done", - "thread_id": thread_id, - "node_name": "system", - "run_id": "", - "timestamp": int(time.time() * 1000), - "data": {}, - }, - request_id=request_id, - tolerate_disconnect=tolerate_disconnect, - agent_run_id=agent_run_id, - assistant_message_id=assistant_message_id, - ) - - except asyncio.CancelledError: - if state is not None: - state.stopped = True - try: - await handler._emit_event( - { - "type": "done", - "thread_id": thread_id or payload.thread_id or "", - "node_name": "system", - "run_id": "", - "timestamp": int(time.time() * 1000), - "data": {}, - }, - request_id=request_id, - tolerate_disconnect=tolerate_disconnect, - agent_run_id=agent_run_id, - assistant_message_id=assistant_message_id, - ) - except Exception: - _logger.debug("error sending stream event", exc_info=True) - raise - except Exception as exc: - if state is not None and not (module.GraphBubbleUp is not None and type(exc) is module.GraphBubbleUp): - state.has_error = True - error_data: dict[str, object] = {"message": str(exc)} - if isinstance(exc, ModelConfigError): - error_data["error_code"] = exc.error_code - error_data["params"] = exc.params - await handler._emit_event( - { - "type": "error", - "thread_id": thread_id or payload.thread_id or "", - "node_name": "system", - "run_id": "", - "timestamp": int(time.time() * 1000), - "data": error_data, - }, - request_id=request_id, - tolerate_disconnect=tolerate_disconnect, - agent_run_id=agent_run_id, - assistant_message_id=assistant_message_id, - ) - await handler._emit_event( - { - "type": "done", - "thread_id": thread_id or payload.thread_id or "", - "node_name": "system", - "run_id": "", - "timestamp": int(time.time() * 1000), - "data": {}, - }, - request_id=request_id, - tolerate_disconnect=tolerate_disconnect, - agent_run_id=agent_run_id, - assistant_message_id=assistant_message_id, - ) - finally: - await _release_graph_sandbox(built_graph) - - await handler._finalize_task( - request_id=request_id, - thread_id=thread_id, - state=state, - built_graph=built_graph, - artifact_collector=artifact_collector, - graph_id=str(payload.graph_id) if payload.graph_id else None, - workspace_id=graph_workspace_id, - graph_name=graph_display_name, - ) - - async def execute_resume_turn(self, request_id: str, thread_id: str, command: dict[str, object]) -> None: - """Resume an interrupted graph execution and stream the remaining events.""" - from app.core.trace_context import set_trace_id - - set_trace_id(request_id) - handler = self._handler - module = self._module - state: StreamState | None = None - built_graph = None - graph_workspace_id: str | None = None - graph_display_name: str | None = None - graph_id = None - config = None - stream_handler = None - ws_command = None - agent_run_id: uuid_lib.UUID | None = None - tolerate_disconnect = False - assistant_message_id = f"msg-assistant-{uuid_lib.uuid4()}" - - try: - async with module.AsyncSessionLocal() as db: - result = await db.execute( - module.select(module.Conversation).where( - module.Conversation.thread_id == thread_id, - module.Conversation.user_id == handler.user_id, - ) - ) - conversation = result.scalar_one_or_none() - if not conversation: - await handler._send( - {"type": "ws_error", "request_id": request_id, "message": "conversation not found"} - ) - return - - if ( - conversation.meta_data - and isinstance(conversation.meta_data, dict) - and "interrupted_graph_id" in conversation.meta_data - ): - try: - graph_id = uuid_lib.UUID(str(conversation.meta_data["interrupted_graph_id"])) - except (ValueError, TypeError): - graph_id = None - - if graph_id is None: - await handler._send({"type": "ws_error", "request_id": request_id, "message": "graph id not found"}) - return - - config, _ = await module.get_user_config(handler.user_id, thread_id) - - from langgraph.types import Command - - from app.repositories.graph import GraphRepository - from app.repositories.user import UserRepository - - graph_repo = GraphRepository(db) - graph_model = await graph_repo.get(graph_id) - if graph_model: - ws_id = getattr(graph_model, "workspace_id", None) - graph_workspace_id = str(ws_id) if ws_id else None - graph_display_name = getattr(graph_model, "name", None) or getattr(graph_model, "title", None) - - user_repo = UserRepository(db) - current_user = await user_repo.get_by_id(handler.user_id) - - graph_service = module.GraphService(db) - built_graph = await graph_service.create_graph_by_graph_id( - graph_id=graph_id, - user_id=handler.user_id, - current_user=current_user, - ) - - snap = await module.safe_get_state( - built_graph, - config, - max_retries=3, - initial_delay=0.1, - log=module.logger, - ) - if not snap.tasks: - await handler._send( - {"type": "ws_error", "request_id": request_id, "message": "no interrupt state found"} - ) - return - - state = module.StreamState(thread_id) - current_task = asyncio.current_task() - if current_task is None: - raise RuntimeError("missing current asyncio task") - if handler._task_supervisor.get(request_id) is None: - handler._task_supervisor.register( - request_id, - ChatTaskEntry(request_id=request_id, thread_id=thread_id, task=current_task), - ) - else: - handler._task_supervisor.update(request_id, thread_id=thread_id, task=current_task) - await module.task_manager.register_task(thread_id, current_task) - - task_entry = handler._task_supervisor.get(request_id) - agent_run_id = task_entry.run_id if task_entry else None - tolerate_disconnect = bool(task_entry and task_entry.persist_on_disconnect) - - if agent_run_id is not None: - await handler._mark_run_status( - run_id=agent_run_id, - status=AgentRunStatus.RUNNING, - runtime_owner_id=handler._runtime_owner_id, - ) - heartbeat_task = asyncio.create_task( - handler._run_persisted_run_heartbeat(agent_run_id), - name=f"run-heartbeat:{agent_run_id}", - ) - handler._task_supervisor.update(request_id, heartbeat_task=heartbeat_task) - await handler._append_run_event( - run_id=agent_run_id, - event_type="assistant_message_started", - payload={ - "message": { - "id": assistant_message_id, - "role": "assistant", - "content": "", - "timestamp": int(time.time() * 1000), - "tool_calls": [], - } - }, - ) - - await handler._send( - { - "type": "accepted", - "request_id": request_id, - "thread_id": thread_id, - "run_id": str(agent_run_id) if agent_run_id is not None else None, - "timestamp": int(time.time() * 1000), - "data": {"status": "accepted"}, - }, - tolerate_disconnect=tolerate_disconnect, - ) - - stream_handler = module.StreamEventHandler() - ws_command = Command(update=command.get("update") or {}, goto=command.get("goto") or None) - - await handler._send_stream_event( - stream_handler.format_sse("status", {"status": "resumed", "_meta": {"node_name": "system"}}, thread_id), - request_id, - tolerate_disconnect=tolerate_disconnect, - agent_run_id=agent_run_id, - assistant_message_id=assistant_message_id, - ) - - interrupted = False - async for event in built_graph.astream_events(ws_command, config=config, version="v2"): - if await module.task_manager.is_stopped(thread_id): - state.stopped = True - break - async for sse_str in module._dispatch_stream_event(event, stream_handler, state): - await handler._send_stream_event( - sse_str, - request_id, - tolerate_disconnect=tolerate_disconnect, - agent_run_id=agent_run_id, - assistant_message_id=assistant_message_id, - ) - - try: - snap = await module.safe_get_state( - built_graph, - config, - max_retries=3, - initial_delay=0.1, - log=module.logger, - ) - if snap.tasks: - next_node = snap.tasks[0].target if snap.tasks else None - current_state = snap.values or {} - await handler._emit_event( - { - "type": "interrupt", - "thread_id": thread_id, - "node_name": next_node or "unknown", - "node_label": next_node.replace("_", " ").title() if next_node else "Unknown Node", - "data": { - "node_name": next_node or "unknown", - "node_label": next_node.replace("_", " ").title() if next_node else "Unknown Node", - "state": current_state, - "thread_id": thread_id, - }, - }, - request_id=request_id, - tolerate_disconnect=tolerate_disconnect, - agent_run_id=agent_run_id, - assistant_message_id=assistant_message_id, - ) - state.interrupted = True - state.interrupt_node = next_node - state.interrupt_state = current_state - interrupted = True - except Exception as exc: - module.logger.warning(f"Failed to inspect resume interrupt state | thread_id={thread_id} | error={exc}") - - if not state.all_messages and not state.stopped and not interrupted: - try: - snap = await module.safe_get_state( - built_graph, - config, - max_retries=2, - initial_delay=0.05, - log=module.logger, - ) - if snap.values and "messages" in snap.values: - msgs = snap.values["messages"] - from langgraph.types import Overwrite - - state.all_messages = msgs.value if isinstance(msgs, Overwrite) else msgs - except Exception as exc: - module.logger.warning(f"Failed to fetch final resume state | thread_id={thread_id} | error={exc}") - - if state.interrupted: - return - - if state.stopped: - await handler._emit_event( - { - "type": "error", - "thread_id": thread_id, - "node_name": "system", - "run_id": "", - "timestamp": int(time.time() * 1000), - "data": {"message": "Stopped by user", "code": "stopped"}, - }, - request_id=request_id, - tolerate_disconnect=tolerate_disconnect, - agent_run_id=agent_run_id, - assistant_message_id=assistant_message_id, - ) - - await handler._emit_event( - { - "type": "done", - "thread_id": thread_id, - "node_name": "system", - "run_id": "", - "timestamp": int(time.time() * 1000), - "data": {}, - }, - request_id=request_id, - tolerate_disconnect=tolerate_disconnect, - agent_run_id=agent_run_id, - assistant_message_id=assistant_message_id, - ) - - except asyncio.CancelledError: - if state is not None: - state.stopped = True - try: - await handler._emit_event( - { - "type": "done", - "thread_id": thread_id, - "node_name": "system", - "run_id": "", - "timestamp": int(time.time() * 1000), - "data": {}, - }, - request_id=request_id, - tolerate_disconnect=tolerate_disconnect, - agent_run_id=agent_run_id, - assistant_message_id=assistant_message_id, - ) - except Exception: - _logger.debug("error during turn cleanup", exc_info=True) - raise - except Exception as exc: - if state is not None and not (module.GraphBubbleUp is not None and type(exc) is module.GraphBubbleUp): - state.has_error = True - await handler._emit_event( - { - "type": "error", - "thread_id": thread_id, - "node_name": "system", - "run_id": "", - "timestamp": int(time.time() * 1000), - "data": {"message": str(exc)}, - }, - request_id=request_id, - tolerate_disconnect=tolerate_disconnect, - agent_run_id=agent_run_id, - assistant_message_id=assistant_message_id, - ) - await handler._emit_event( - { - "type": "done", - "thread_id": thread_id, - "node_name": "system", - "run_id": "", - "timestamp": int(time.time() * 1000), - "data": {}, - }, - request_id=request_id, - tolerate_disconnect=tolerate_disconnect, - agent_run_id=agent_run_id, - assistant_message_id=assistant_message_id, - ) - finally: - await _release_graph_sandbox(built_graph) - - await handler._finalize_task( - request_id=request_id, - thread_id=thread_id, - state=state, - built_graph=built_graph, - artifact_collector=None, - graph_id=str(graph_id) if graph_id else None, - workspace_id=graph_workspace_id, - graph_name=graph_display_name, - ) - - async def execute_copilot_turn( - self, - request_id: str, - payload: ChatRequest, - graph_context: dict[str, Any], - conversation_history: list[dict[str, Any]], - mode: str, - ) -> None: - """Execute a copilot turn: consume CopilotService stream and emit events.""" - from app.core.trace_context import set_trace_id - - set_trace_id(request_id) - handler = self._handler - module = self._module - state: StreamState | None = None - thread_id: str | None = None - task_entry = handler._task_supervisor.get(request_id) - agent_run_id = task_entry.run_id if task_entry else None - tolerate_disconnect = bool(task_entry and task_entry.persist_on_disconnect) - assistant_message_id = f"msg-assistant-{uuid_lib.uuid4()}" - - # Collection vars for graph persistence - final_actions: list[dict[str, Any]] = [] - - try: - async with module.AsyncSessionLocal() as db: - # Get or create thread for this copilot turn - thread_id, _ = await module.get_or_create_conversation( - payload.thread_id, - payload.message, - handler.user_id, - payload.metadata, - db, - ) - await module.save_user_message(thread_id, payload.message, payload.metadata, db) - - state = module.StreamState(thread_id) - current_task = asyncio.current_task() - if current_task is None: - raise RuntimeError("missing current asyncio task") - if handler._task_supervisor.get(request_id) is None: - handler._task_supervisor.register( - request_id, - ChatTaskEntry( - request_id=request_id, - thread_id=thread_id, - task=current_task, - run_id=agent_run_id, - persist_on_disconnect=tolerate_disconnect, - ), - ) - else: - handler._task_supervisor.update( - request_id, - thread_id=thread_id, - task=current_task, - run_id=agent_run_id, - persist_on_disconnect=tolerate_disconnect, - ) - await module.task_manager.register_task(thread_id, current_task) - - # Start heartbeat if persisted run - if agent_run_id is not None: - await handler._mark_run_status( - run_id=agent_run_id, - status=AgentRunStatus.RUNNING, - runtime_owner_id=handler._runtime_owner_id, - ) - heartbeat_task = asyncio.create_task( - handler._run_persisted_run_heartbeat(agent_run_id), - name=f"run-heartbeat:{agent_run_id}", - ) - handler._task_supervisor.update(request_id, heartbeat_task=heartbeat_task) - - # Emit accepted - await handler._send( - { - "type": "accepted", - "request_id": request_id, - "thread_id": thread_id, - "run_id": str(agent_run_id) if agent_run_id is not None else None, - "timestamp": int(time.time() * 1000), - "data": {"status": "accepted"}, - }, - tolerate_disconnect=tolerate_disconnect, - ) - - # Create CopilotService and get stream - from app.services.copilot_service import CopilotService - - async with module.AsyncSessionLocal() as db: - # payload carries split model fields from frontend - service = CopilotService( - user_id=handler.user_id, - provider_name=payload.provider_name, - model_name=payload.model_name, - db=db, - ) - stream = service._get_copilot_stream( - prompt=payload.message, - graph_context=graph_context, - conversation_history=conversation_history, - mode=mode, - graph_id=str(payload.graph_id) if payload.graph_id else None, - ) - - async for event in stream: - if await module.task_manager.is_stopped(thread_id): - state.stopped = True - break - - event_type = event.get("type", "") - - # Collect for persistence - if event_type == "result": - final_actions = event.get("actions", []) - - # Emit to WS + run events - await handler._emit_event( - { - "type": event_type, - "thread_id": thread_id, - "node_name": "copilot", - "timestamp": int(time.time() * 1000), - "data": event, - }, - request_id=request_id, - tolerate_disconnect=tolerate_disconnect, - agent_run_id=agent_run_id, - assistant_message_id=assistant_message_id, - ) - - # Persist graph changes - # _persist_graph_from_actions creates its own DB session internally, - # so we only need user_id set on the service. - if payload.graph_id and final_actions: - persist_service = CopilotService(user_id=handler.user_id) - await persist_service._persist_graph_from_actions( - graph_id=str(payload.graph_id), - final_actions=final_actions, - ) - - # Emit done - await handler._emit_event( - { - "type": "done", - "thread_id": thread_id, - "node_name": "copilot", - "timestamp": int(time.time() * 1000), - "data": {}, - }, - request_id=request_id, - tolerate_disconnect=tolerate_disconnect, - agent_run_id=agent_run_id, - assistant_message_id=assistant_message_id, - ) - - except asyncio.CancelledError: - if state is not None: - state.stopped = True - try: - await handler._emit_event( - { - "type": "done", - "thread_id": thread_id or "", - "node_name": "copilot", - "timestamp": int(time.time() * 1000), - "data": {}, - }, - request_id=request_id, - tolerate_disconnect=tolerate_disconnect, - agent_run_id=agent_run_id, - assistant_message_id=assistant_message_id, - ) - except Exception: - _logger.debug("error during copilot turn cleanup", exc_info=True) - raise - except Exception as exc: - if state is not None: - state.has_error = True - error_data: dict[str, object] = {"message": str(exc)} - if isinstance(exc, ModelConfigError): - error_data["error_code"] = exc.error_code - error_data["params"] = exc.params - await handler._emit_event( - { - "type": "error", - "thread_id": thread_id or "", - "node_name": "copilot", - "timestamp": int(time.time() * 1000), - "data": error_data, - }, - request_id=request_id, - tolerate_disconnect=tolerate_disconnect, - agent_run_id=agent_run_id, - assistant_message_id=assistant_message_id, - ) - await handler._emit_event( - { - "type": "done", - "thread_id": thread_id or "", - "node_name": "copilot", - "timestamp": int(time.time() * 1000), - "data": {}, - }, - request_id=request_id, - tolerate_disconnect=tolerate_disconnect, - agent_run_id=agent_run_id, - assistant_message_id=assistant_message_id, - ) - finally: - await handler._finalize_task( - request_id=request_id, - thread_id=thread_id, - state=state, - built_graph=None, - artifact_collector=None, - graph_id=str(payload.graph_id) if payload.graph_id else None, - workspace_id=None, - graph_name=None, - ) - - @staticmethod - def _parse_uuid(value: object) -> uuid_lib.UUID | None: - """Parse a value into a UUID, returning None on failure.""" - if not value: - return None - try: - return uuid_lib.UUID(str(value)) - except (ValueError, TypeError): - return None diff --git a/backend/app/websocket/chat_ws_handler.py b/backend/app/websocket/chat_ws_handler.py deleted file mode 100644 index 29a9deb1e..000000000 --- a/backend/app/websocket/chat_ws_handler.py +++ /dev/null @@ -1,630 +0,0 @@ -"""Persistent WebSocket chat handler for Chat page streaming.""" - -import asyncio -import json -import sys -import time -import uuid as uuid_lib -from typing import Any, cast - -from fastapi import WebSocket, WebSocketDisconnect -from langchain.messages import HumanMessage as HumanMessage -from loguru import logger -from sqlalchemy import select as select - -# Re-exported names below are part of ChatTurnExecutor's module dependency contract. -from app.api.v1.chat import ( - GraphBubbleUp as GraphBubbleUp, -) -from app.api.v1.chat import ( - _clear_interrupt_marker, - save_run_result, -) -from app.api.v1.chat import ( - _dispatch_stream_event as _dispatch_stream_event, -) -from app.api.v1.chat import ( - _enrich_message as _enrich_message, -) -from app.api.v1.chat import ( - get_or_create_conversation as get_or_create_conversation, -) -from app.api.v1.chat import ( - get_user_config as get_user_config, -) -from app.api.v1.chat import ( - safe_get_state as safe_get_state, -) -from app.api.v1.chat import ( - save_user_message as save_user_message, -) -from app.core.agent.artifacts import ArtifactCollector -from app.core.database import AsyncSessionLocal as AsyncSessionLocal -from app.core.database import async_session_factory -from app.core.settings import settings -from app.models import Conversation as Conversation -from app.models.agent_run import AgentRunStatus -from app.schemas.chat import ChatRequest -from app.services.graph_service import GraphService as GraphService -from app.services.run_service import RunService -from app.utils.file_event_emitter import FileEventEmitter as FileEventEmitter -from app.utils.stream_event_handler import StreamEventHandler as StreamEventHandler -from app.utils.stream_event_handler import StreamState -from app.utils.task_manager import task_manager -from app.websocket.chat_commands import ( - ChatTurnCommand, - CopilotTurnCommand, - build_command_from_parsed_frame, -) -from app.websocket.chat_protocol import ChatProtocolError, ParsedChatStartFrame, parse_client_frame -from app.websocket.chat_task_supervisor import ChatTaskEntry as ChatTaskEntry -from app.websocket.chat_task_supervisor import ChatTaskSupervisor -from app.websocket.chat_turn_executor import ChatTurnExecutor - - -class ChatWsHandler: - """Handle a persistent `/ws/chat` connection for a single user.""" - - def __init__(self, user_id: str, websocket: WebSocket): - """Initialize the handler for a single authenticated user connection.""" - self.user_id = user_id - self.websocket = websocket - self._task_supervisor = ChatTaskSupervisor( - stop_task=self._stop_managed_task, - ) - self._tasks = self._task_supervisor.tasks - self._turn_executor = ChatTurnExecutor(handler=self, dependencies=cast(Any, sys.modules[__name__])) - self._send_lock = asyncio.Lock() - self._socket_connected = True - self._runtime_owner_id = settings.run_runtime_instance_id - - async def _stop_managed_task(self, thread_id: str) -> None: - """Delegate task cancellation to the global task manager.""" - await task_manager.stop_task(thread_id) - - async def run(self) -> None: - """Read frames in a loop until the client disconnects.""" - try: - while True: - raw = await self.websocket.receive_text() - await self._handle_frame(raw) - except WebSocketDisconnect: - self._socket_connected = False - logger.info(f"Chat WebSocket disconnected | user_id={self.user_id}") - finally: - await self._cancel_all_tasks() - - async def _handle_frame(self, raw: str) -> None: - """Parse and dispatch a single client frame.""" - try: - frame = json.loads(raw) - except json.JSONDecodeError: - await self._send({"type": "ws_error", "message": "invalid json frame"}) - return - - if not isinstance(frame, dict): - await self._send({"type": "ws_error", "message": "frame must be a JSON object"}) - return - - try: - parsed_frame = parse_client_frame(frame) - except ChatProtocolError as exc: - await self._send( - { - "type": "ws_error", - "message": exc.message, - "request_id": exc.request_id, - } - ) - return - - if isinstance(parsed_frame, ParsedChatStartFrame): - await self._handle_chat_start_frame(parsed_frame) - return - - frame_type = str(parsed_frame.get("type") or "") - if frame_type == "chat.resume": - await self._handle_resume(parsed_frame) - return - if frame_type == "chat.stop": - await self._handle_stop(parsed_frame) - return - if frame_type == "ping": - await self._send({"type": "pong"}) - return - - await self._send({"type": "ws_error", "message": f"unknown frame type: {frame_type}"}) - - async def _handle_chat_start_frame(self, frame: ParsedChatStartFrame) -> None: - """Convert a parsed chat.start frame into a turn command and launch it.""" - command = build_command_from_parsed_frame(frame) - await self._start_turn_from_command(command) - - async def _start_turn_from_command(self, command: ChatTurnCommand) -> None: - """Validate and schedule a new chat turn as a supervised async task.""" - prepared = self._turn_executor.prepare_standard_turn(command) - request_id = prepared.request_id - message = prepared.payload.message - thread_key = prepared.payload.thread_id - - if not request_id or not message.strip(): - await self._send({"type": "ws_error", "message": "request_id and message are required"}) - return - if self._task_supervisor.has_request(request_id): - await self._send({"type": "ws_error", "message": "duplicate request_id"}) - return - if thread_key and self._task_supervisor.is_thread_active(thread_key): - await self._send( - { - "type": "ws_error", - "request_id": request_id, - "message": "turn already in progress for thread_id", - } - ) - return - - from app.core.trace_context import set_trace_id - - set_trace_id(prepared.request_id) - - async def runner() -> None: - if isinstance(command, CopilotTurnCommand): - await self._turn_executor.execute_copilot_turn( - request_id=prepared.request_id, - payload=prepared.payload, - graph_context=command.graph_context, - conversation_history=command.conversation_history, - mode=command.mode, - ) - else: - await self._turn_executor.run_standard_turn(prepared) - - self._task_supervisor.create_task( - request_id, - runner(), - name=f"chat-ws:{request_id}", - thread_id=thread_key, - run_id=prepared.run_id, - persist_on_disconnect=prepared.persist_on_disconnect, - ) - - async def _handle_resume(self, frame: dict[str, Any]) -> None: - """Resume an interrupted graph turn for the given thread.""" - request_id = str(frame.get("request_id") or "") - thread_id = str(frame.get("thread_id") or "") - raw_command = frame.get("command") - command: dict[str, Any] = cast(dict[str, Any], raw_command) if isinstance(raw_command, dict) else {} - - if not request_id or not thread_id: - await self._send({"type": "ws_error", "message": "request_id and thread_id are required"}) - return - if self._task_supervisor.has_request(request_id): - await self._send({"type": "ws_error", "message": "duplicate request_id"}) - return - if self._task_supervisor.is_thread_active(thread_id): - await self._send( - { - "type": "ws_error", - "request_id": request_id, - "message": "turn already in progress for thread_id", - } - ) - return - - from app.core.trace_context import set_trace_id - - set_trace_id(request_id) - - async def runner() -> None: - await self._turn_executor.run_resume_turn(request_id=request_id, thread_id=thread_id, command=command) - - # Inherit run_id from the previous task entry for this thread (if persisted) - existing_entry = self._task_supervisor.get_by_thread(thread_id) - resume_run_id = existing_entry.run_id if existing_entry else None - resume_persist = existing_entry.persist_on_disconnect if existing_entry else False - - self._task_supervisor.create_task( - request_id, - runner(), - name=f"chat-ws-resume:{request_id}", - thread_id=thread_id, - run_id=resume_run_id, - persist_on_disconnect=resume_persist, - ) - - async def _handle_stop(self, frame: dict[str, Any]) -> None: - """Cancel the running turn identified by request_id.""" - request_id = str(frame.get("request_id") or "") - if not request_id: - return - - await self._task_supervisor.stop_by_request_id(request_id) - - @staticmethod - def _parse_uuid(value: Any) -> uuid_lib.UUID | None: - """Parse a value into a UUID, returning None on failure.""" - if not value: - return None - try: - return uuid_lib.UUID(str(value)) - except (ValueError, TypeError): - return None - - async def _append_run_event( - self, - *, - run_id: uuid_lib.UUID, - event_type: str, - payload: dict[str, Any], - trace_id: uuid_lib.UUID | None = None, - observation_id: uuid_lib.UUID | None = None, - parent_observation_id: uuid_lib.UUID | None = None, - ) -> None: - """Persist a stream event to the durable run event log.""" - async with async_session_factory() as db: - service = RunService(db) - await service.append_event( - run_id=run_id, - event_type=event_type, - payload=payload, - trace_id=trace_id, - observation_id=observation_id, - parent_observation_id=parent_observation_id, - ) - - async def _mark_run_status( - self, - *, - run_id: uuid_lib.UUID, - status: AgentRunStatus, - runtime_owner_id: str | None = None, - error_code: str | None = None, - error_message: str | None = None, - result_summary: dict[str, Any] | None = None, - ) -> None: - """Update the persisted status of a durable agent run.""" - async with async_session_factory() as db: - service = RunService(db) - await service.mark_status( - run_id=run_id, - user_id=self.user_id, - status=status, - runtime_owner_id=runtime_owner_id, - error_code=error_code, - error_message=error_message, - result_summary=result_summary, - ) - - async def _touch_run_heartbeat(self, *, run_id: uuid_lib.UUID) -> None: - """Send a single heartbeat for a durable run to indicate liveness.""" - async with async_session_factory() as db: - service = RunService(db) - await service.touch_run_heartbeat( - run_id=run_id, - runtime_owner_id=self._runtime_owner_id, - ) - - async def _run_persisted_run_heartbeat(self, run_id: uuid_lib.UUID) -> None: - """Periodically touch the heartbeat for a persisted run until cancelled.""" - while True: - try: - await asyncio.sleep(settings.run_heartbeat_interval_seconds) - await self._touch_run_heartbeat(run_id=run_id) - except asyncio.CancelledError: - raise - except Exception as exc: - logger.warning(f"Persisted run heartbeat failed, will retry | run_id={run_id} | error={exc}") - await asyncio.sleep(5) # brief backoff before retry - - async def _mirror_run_stream_event( - self, - *, - run_id: uuid_lib.UUID, - event: dict[str, Any], - assistant_message_id: str | None, - ) -> None: - """Translate a WS stream event and persist it to the durable run log.""" - event_type = str(event.get("type") or "") - raw_data = event.get("data") - data = raw_data if isinstance(raw_data, dict) else {} - timestamp = int(event.get("timestamp") or int(time.time() * 1000)) - observation_id = self._parse_uuid(event.get("observation_id")) - - payload: dict[str, Any] | None = None - if event_type == "status": - stage = data.get("stage") - if stage is not None: - payload = {"stage": stage, "message": data.get("message", "")} - else: - message = str(data.get("status") or "") - payload = {"message": message, "status": message} - elif event_type == "content" and assistant_message_id: - delta = data.get("delta") if "delta" in data else data.get("content") - if delta: - payload = {"message_id": assistant_message_id, "delta": str(delta)} - elif event_type in ("thought_step", "tool_call", "tool_result", "result"): - payload = data - elif event_type == "tool_start" and assistant_message_id: - tool_input = data.get("tool_input") - payload = { - "message_id": assistant_message_id, - "tool": { - "id": str(observation_id or uuid_lib.uuid4()), - "name": str(data.get("tool_name") or "tool"), - "args": tool_input if isinstance(tool_input, dict) else {}, - "status": "running", - "startTime": timestamp, - }, - } - elif event_type == "tool_end" and assistant_message_id: - payload = { - "message_id": assistant_message_id, - "tool_id": str(observation_id) if observation_id else None, - "tool_name": data.get("tool_name"), - "tool_output": data.get("tool_output"), - "end_time": timestamp, - } - elif event_type == "file_event": - payload = { - "action": data.get("action"), - "path": data.get("path"), - "size": data.get("size"), - "timestamp": data.get("timestamp"), - } - elif event_type == "interrupt": - payload = {"interrupt": data} - elif event_type == "error": - payload = {"message": data.get("message"), "code": data.get("code")} - elif event_type == "done": - payload = {} - - if payload is None: - return - - await self._append_run_event( - run_id=run_id, - event_type="content_delta" if event_type == "content" else event_type, - payload=payload, - trace_id=self._parse_uuid(event.get("trace_id")), - observation_id=observation_id, - parent_observation_id=self._parse_uuid(event.get("parent_observation_id")), - ) - - async def _emit_event( - self, - event: dict[str, Any], - *, - request_id: str | None = None, - tolerate_disconnect: bool = False, - agent_run_id: uuid_lib.UUID | None = None, - assistant_message_id: str | None = None, - ) -> None: - """Send an event to the client and optionally mirror it to durable storage.""" - outbound = dict(event) - if request_id is not None: - outbound["request_id"] = request_id - if agent_run_id is not None: - asyncio.create_task( - self._mirror_run_stream_event( - run_id=agent_run_id, - event=outbound, - assistant_message_id=assistant_message_id, - ), - name=f"mirror-event:{agent_run_id}", - ) - await self._send(outbound, tolerate_disconnect=tolerate_disconnect) - - async def _run_chat_turn(self, request_id: str, payload: ChatRequest) -> None: - """Execute a standard (new-message) chat turn.""" - await self._turn_executor.execute_standard_turn(request_id=request_id, payload=payload) - - async def _run_resume_turn(self, request_id: str, thread_id: str, command: dict[str, Any]) -> None: - """Execute a resume turn to continue an interrupted graph.""" - await self._turn_executor.execute_resume_turn(request_id=request_id, thread_id=thread_id, command=command) - - async def _finalize_task( - self, - *, - request_id: str, - thread_id: str | None, - state: StreamState | None, - built_graph: Any, - artifact_collector: ArtifactCollector | None, - graph_id: str | None, - workspace_id: str | None, - graph_name: str | None, - ) -> None: - """Clean up after a turn: save results, write artifacts, and update run status.""" - task_entry = await self._task_supervisor.finalize(request_id) - agent_run_id = task_entry.run_id if task_entry else None - - if thread_id: - try: - await task_manager.unregister_task(thread_id) - except Exception as exc: - logger.warning(f"Failed to unregister task | thread_id={thread_id} | error={exc}") - - if thread_id and state is not None: - try: - await save_run_result( - thread_id, - state, - logger.bind(user_id=self.user_id, thread_id=thread_id), - graph_id=graph_id, - workspace_id=workspace_id, - user_id=self.user_id, - graph_name=graph_name, - ) - except Exception as exc: - logger.warning(f"Failed to save run result | thread_id={thread_id} | error={exc}") - - if built_graph is not None and hasattr(built_graph, "_cleanup_backend"): - try: - await built_graph._cleanup_backend() - except Exception as exc: - logger.warning(f"Failed to cleanup backend | thread_id={thread_id} | error={exc}") - - if thread_id and artifact_collector is not None and state is not None: - try: - run_dir = artifact_collector.ensure_run_dir(self.user_id, thread_id, state.artifact_run_id) - if built_graph is not None and hasattr(built_graph, "_export_artifacts_to"): - try: - built_graph._export_artifacts_to(run_dir) - except Exception as exc: - logger.warning(f"Sandbox export failed | thread_id={thread_id} | error={exc}") - status = "completed" - if state.stopped: - status = "stopped" - elif state.has_error: - status = "failed" - elif state.interrupted: - status = "interrupted" - artifact_collector.write_manifest( - run_dir, - { - "run_id": state.artifact_run_id, - "thread_id": thread_id, - "user_id": self.user_id, - "agent_type": "langgraph", - "graph_id": graph_id, - "status": status, - }, - ) - except Exception as exc: - logger.warning(f"Failed to write artifact manifest | thread_id={thread_id} | error={exc}") - - if thread_id and state is not None and not state.interrupted: - await _clear_interrupt_marker(thread_id, logger.bind(user_id=self.user_id, thread_id=thread_id)) - - if agent_run_id is not None: - result_summary = { - "thread_id": thread_id, - "graph_id": graph_id, - "workspace_id": workspace_id, - "graph_name": graph_name, - "artifact_run_id": state.artifact_run_id if state is not None else None, - } - try: - if state is None: - await self._mark_run_status( - run_id=agent_run_id, - status=AgentRunStatus.FAILED, - error_code="missing_state", - error_message="Run finalized without stream state", - result_summary=result_summary, - ) - elif state.interrupted: - await self._mark_run_status( - run_id=agent_run_id, - status=AgentRunStatus.INTERRUPT_WAIT, - result_summary=result_summary, - ) - elif state.stopped: - await self._mark_run_status( - run_id=agent_run_id, - status=AgentRunStatus.CANCELLED, - error_code="stopped", - error_message="Stopped by user", - result_summary=result_summary, - ) - elif state.has_error: - await self._mark_run_status( - run_id=agent_run_id, - status=AgentRunStatus.FAILED, - error_code="stream_error", - error_message="Agent run failed", - result_summary=result_summary, - ) - else: - await self._mark_run_status( - run_id=agent_run_id, - status=AgentRunStatus.COMPLETED, - result_summary=result_summary, - ) - except Exception as exc: - logger.warning(f"Failed to update persisted run status | run_id={agent_run_id} | error={exc}") - - async def _send_stream_event( - self, - sse_str: str | None, - request_id: str, - *, - tolerate_disconnect: bool = False, - agent_run_id: uuid_lib.UUID | None = None, - assistant_message_id: str | None = None, - ) -> None: - """Parse an SSE-formatted string and emit it as a WebSocket event.""" - event = self._parse_stream_event(sse_str) - if not event: - return - await self._emit_event( - event, - request_id=request_id, - tolerate_disconnect=tolerate_disconnect, - agent_run_id=agent_run_id, - assistant_message_id=assistant_message_id, - ) - - def _parse_stream_event(self, sse_str: str | None) -> dict[str, Any] | None: - """Extract the JSON payload from an SSE data line.""" - if not sse_str: - return None - - payload_str = "" - for line in sse_str.splitlines(): - stripped = line.strip() - if stripped.startswith("data:"): - payload_str = stripped[len("data:") :].strip() - break - - if not payload_str: - return None - - try: - payload = json.loads(payload_str) - except json.JSONDecodeError: - logger.warning("Failed to decode SSE payload for WS bridge") - return None - - if not isinstance(payload, dict): - return None - - return cast(dict[str, Any], payload) - - async def _send(self, event: dict[str, Any], *, tolerate_disconnect: bool = False) -> bool: - """Serialize and send a JSON event over the WebSocket. - - Returns: - True if sent successfully, False if the socket is disconnected - and tolerate_disconnect is True. - - Raises: - WebSocketDisconnect: If the socket is disconnected and - tolerate_disconnect is False. - """ - if not self._socket_connected: - if tolerate_disconnect: - return False - raise WebSocketDisconnect() - try: - async with self._send_lock: - await self.websocket.send_text(json.dumps(event)) - return True - except WebSocketDisconnect: - self._socket_connected = False - if tolerate_disconnect: - return False - raise - except RuntimeError: - self._socket_connected = False - if tolerate_disconnect: - return False - raise WebSocketDisconnect() - - def _is_thread_active(self, thread_id: str) -> bool: - """Check whether a turn is currently running for the given thread.""" - return self._task_supervisor.is_thread_active(thread_id) - - async def _cancel_all_tasks(self) -> None: - """Cancel all non-persistent tasks on disconnect.""" - await self._task_supervisor.cancel_all() diff --git a/backend/app/websocket/execution_subscription_handler.py b/backend/app/websocket/execution_subscription_handler.py new file mode 100644 index 000000000..2c6edf806 --- /dev/null +++ b/backend/app/websocket/execution_subscription_handler.py @@ -0,0 +1,204 @@ +"""WebSocket handler for execution event subscriptions.""" + +from __future__ import annotations + +import json +import uuid + +from fastapi import WebSocket, WebSocketDisconnect + +from app.common.app_errors import AppError, InvalidRequestError, NotFoundError +from app.core.database import AsyncSessionLocal +from app.services.execution_service import ExecutionService +from app.websocket.execution_subscription_manager import execution_subscription_manager + + +def _ws_error_frame(error: AppError) -> dict[str, object]: + return { + "type": "ws_error", + "error": error.to_payload(), + } + + +class ExecutionSubscriptionHandler: + """Handles subscribe/unsubscribe frames for execution event streams.""" + + async def handle_connection(self, websocket: WebSocket, user_id: str) -> None: + await websocket.accept() + try: + while True: + raw = await websocket.receive_text() + await self._handle_frame(websocket, user_id, raw) + except WebSocketDisconnect: + pass + finally: + execution_subscription_manager.disconnect(websocket) + + async def _handle_frame(self, websocket: WebSocket, user_id: str, raw: str) -> None: + try: + frame = json.loads(raw) + except json.JSONDecodeError: + await websocket.send_text( + json.dumps( + _ws_error_frame( + InvalidRequestError( + "无效的 websocket 帧", + code="WEBSOCKET_INVALID_JSON", + data={"detail": "The execution subscription frame is not valid JSON."}, + ) + ) + ) + ) + return + + frame_type = frame.get("type") + if frame_type == "ping": + await websocket.send_text(json.dumps({"type": "pong"})) + return + + if frame_type == "unsubscribe": + execution_id = frame.get("execution_id") + if execution_id: + execution_subscription_manager.remove_subscription(websocket, str(execution_id)) + return + + if frame_type != "subscribe": + await websocket.send_text( + json.dumps( + _ws_error_frame( + InvalidRequestError( + "未知的 websocket 帧类型", + code="WEBSOCKET_UNKNOWN_FRAME_TYPE", + data={"frame_type": frame_type}, + ) + ) + ) + ) + return + + execution_id_raw = frame.get("execution_id") + if not execution_id_raw: + await websocket.send_text( + json.dumps( + _ws_error_frame( + InvalidRequestError( + "execution_id 是必填项", + code="WEBSOCKET_EXECUTION_ID_REQUIRED", + data=None, + ) + ) + ) + ) + return + + try: + execution_id = uuid.UUID(str(execution_id_raw)) + except ValueError: + await websocket.send_text( + json.dumps( + _ws_error_frame( + InvalidRequestError( + "execution_id 无效", + code="WEBSOCKET_INVALID_EXECUTION_ID", + data={"execution_id": execution_id_raw}, + ) + ) + ) + ) + return + + try: + after_seq = int(frame.get("after_seq") or 0) + except (ValueError, TypeError): + await websocket.send_text( + json.dumps( + _ws_error_frame( + InvalidRequestError( + "after_seq 无效", + code="WEBSOCKET_INVALID_AFTER_SEQ", + data={"after_seq": frame.get("after_seq")}, + ) + ) + ) + ) + return + + async with AsyncSessionLocal() as db: + service = ExecutionService(db) + execution = await service.get_execution(execution_id, user_id) + if execution is None: + await websocket.send_text( + json.dumps( + _ws_error_frame( + NotFoundError( + "执行不存在", + code="EXECUTION_NOT_FOUND", + data={"execution_id": str(execution_id)}, + ) + ) + ) + ) + return + + snapshot = await service.get_snapshot(execution_id, user_id) + if snapshot is None: + await websocket.send_text( + json.dumps( + _ws_error_frame( + NotFoundError( + "执行快照不存在", + code="EXECUTION_SNAPSHOT_NOT_FOUND", + data={"execution_id": str(execution_id)}, + ) + ) + ) + ) + return + + snapshot_last_seq = int(snapshot.last_seq or 0) + await websocket.send_text( + json.dumps( + { + "type": "snapshot", + "execution_id": str(execution_id), + "last_seq": snapshot_last_seq, + "status": snapshot.projection.get("status"), + "error": snapshot.projection.get("error"), + "events": [], + } + ) + ) + + await execution_subscription_manager.add_subscription(websocket, str(execution_id)) + + catchup_after_seq = max(after_seq, snapshot_last_seq) + events = await service.list_events_after(execution_id, user_id, after_seq=catchup_after_seq, limit=1000) + replay_last_seq = snapshot_last_seq + for event in events: + seq = int(event.sequence_no) + replay_last_seq = max(replay_last_seq, seq) + await websocket.send_text( + json.dumps( + { + "type": "event", + "execution_id": str(execution_id), + "seq": seq, + "event_type": event.event_type, + "payload": event.payload, + "created_at": event.created_at.isoformat() if event.created_at else None, + } + ) + ) + + await websocket.send_text( + json.dumps( + { + "type": "replay_done", + "execution_id": str(execution_id), + "last_seq": replay_last_seq, + } + ) + ) + + +execution_subscription_handler = ExecutionSubscriptionHandler() diff --git a/backend/app/websocket/execution_subscription_manager.py b/backend/app/websocket/execution_subscription_manager.py new file mode 100644 index 000000000..46e781044 --- /dev/null +++ b/backend/app/websocket/execution_subscription_manager.py @@ -0,0 +1,69 @@ +"""In-memory execution subscription manager.""" + +from __future__ import annotations + +import json +from collections import defaultdict +from typing import Any + +from fastapi import WebSocket + + +class ExecutionSubscriptionManager: + """Tracks which WebSocket connections are subscribed to which execution IDs.""" + + def __init__(self) -> None: + self._exec_connections: dict[str, set[WebSocket]] = defaultdict(set) + self._connection_execs: dict[WebSocket, set[str]] = defaultdict(set) + + async def add_subscription(self, websocket: WebSocket, execution_id: str) -> None: + self._exec_connections[execution_id].add(websocket) + self._connection_execs[websocket].add(execution_id) + + def remove_subscription(self, websocket: WebSocket, execution_id: str) -> None: + execs = self._connection_execs.get(websocket) + if execs: + execs.discard(execution_id) + if not execs: + self._connection_execs.pop(websocket, None) + + connections = self._exec_connections.get(execution_id) + if connections: + connections.discard(websocket) + if not connections: + self._exec_connections.pop(execution_id, None) + + def disconnect(self, websocket: WebSocket) -> None: + exec_ids = list(self._connection_execs.get(websocket, set())) + for exec_id in exec_ids: + self.remove_subscription(websocket, exec_id) + + def remove_execution(self, execution_id: str) -> None: + """Remove all subscriptions for a completed execution, freeing memory.""" + connections = list(self._exec_connections.get(execution_id, set())) + for websocket in connections: + execs = self._connection_execs.get(websocket) + if execs: + execs.discard(execution_id) + if not execs: + self._connection_execs.pop(websocket, None) + self._exec_connections.pop(execution_id, None) + + async def broadcast_event(self, execution_id: str, message: dict[str, Any]) -> int: + connections = list(self._exec_connections.get(execution_id, set())) + success_count = 0 + disconnected: list[WebSocket] = [] + for connection in connections: + try: + await connection.send_text(json.dumps(message, default=str)) + success_count += 1 + except Exception: + disconnected.append(connection) + + for connection in disconnected: + self.disconnect(connection) + + return success_count + + +execution_subscription_manager = ExecutionSubscriptionManager() diff --git a/backend/app/websocket/notification_manager.py b/backend/app/websocket/notification_manager.py index 108040d3e..9f2a2ff0d 100644 --- a/backend/app/websocket/notification_manager.py +++ b/backend/app/websocket/notification_manager.py @@ -16,6 +16,9 @@ class NotificationType(str, Enum): INVITATION_ACCEPTED = "invitation_accepted" INVITATION_REJECTED = "invitation_rejected" INVITATION_CANCELLED = "invitation_cancelled" + TASK_ACTIVITY_ADDED = "task_activity_added" + TASK_UPDATED = "task_updated" + EXECUTION_STATUS_CHANGED = "execution_status_changed" PING = "ping" PONG = "pong" CONNECTED = "connected" diff --git a/backend/app/websocket/run_subscription_handler.py b/backend/app/websocket/run_subscription_handler.py deleted file mode 100644 index 446ce4c68..000000000 --- a/backend/app/websocket/run_subscription_handler.py +++ /dev/null @@ -1,130 +0,0 @@ -"""WebSocket handler for durable run subscriptions.""" - -from __future__ import annotations - -import json -import uuid - -from fastapi import WebSocket, WebSocketDisconnect - -from app.core.database import AsyncSessionLocal -from app.services.run_service import RunService -from app.websocket.run_subscription_manager import run_subscription_manager - - -class RunSubscriptionHandler: - """Handles subscribe/unsubscribe frames for durable run event streams.""" - - async def handle_connection(self, websocket: WebSocket, user_id: str) -> None: - """Accept the WebSocket and process frames until disconnect.""" - await websocket.accept() - try: - while True: - raw = await websocket.receive_text() - await self._handle_frame(websocket, user_id, raw) - except WebSocketDisconnect: - pass - finally: - run_subscription_manager.disconnect(websocket) - - async def _handle_frame(self, websocket: WebSocket, user_id: str, raw: str) -> None: - """Parse a raw JSON frame and handle subscribe, unsubscribe, or ping.""" - try: - frame = json.loads(raw) - except json.JSONDecodeError: - await websocket.send_text(json.dumps({"type": "ws_error", "message": "invalid json frame"})) - return - - frame_type = frame.get("type") - if frame_type == "ping": - await websocket.send_text(json.dumps({"type": "pong"})) - return - - if frame_type == "unsubscribe": - run_id = frame.get("run_id") - if run_id: - run_subscription_manager.remove_subscription(websocket, str(run_id)) - return - - if frame_type != "subscribe": - await websocket.send_text(json.dumps({"type": "ws_error", "message": f"unknown frame type: {frame_type}"})) - return - - run_id_raw = frame.get("run_id") - if not run_id_raw: - await websocket.send_text(json.dumps({"type": "ws_error", "message": "run_id is required"})) - return - - try: - run_id = uuid.UUID(str(run_id_raw)) - except ValueError: - await websocket.send_text(json.dumps({"type": "ws_error", "message": "invalid run_id"})) - return - - try: - after_seq = int(frame.get("after_seq") or 0) - except (ValueError, TypeError): - await websocket.send_text(json.dumps({"type": "ws_error", "message": "invalid after_seq"})) - return - - async with AsyncSessionLocal() as db: - service = RunService(db) - run = await service.get_run(run_id, user_id) - if run is None: - await websocket.send_text(json.dumps({"type": "ws_error", "message": "run not found"})) - return - - snapshot = await service.get_snapshot(run_id, user_id) - if snapshot is None: - await websocket.send_text(json.dumps({"type": "ws_error", "message": "snapshot not found"})) - return - - snapshot_last_seq = int(snapshot.last_seq or 0) - await websocket.send_text( - json.dumps( - { - "type": "snapshot", - "run_id": str(run_id), - "last_seq": snapshot_last_seq, - "data": snapshot.projection, - } - ) - ) - - await run_subscription_manager.add_subscription(websocket, str(run_id)) - - catchup_after_seq = max(after_seq, snapshot_last_seq) - events = await service.list_events_after(run_id, user_id, after_seq=catchup_after_seq, limit=1000) - replay_last_seq = snapshot_last_seq - for event in events: - replay_last_seq = max(replay_last_seq, int(event.seq)) - await websocket.send_text( - json.dumps( - { - "type": "event", - "run_id": str(run_id), - "seq": event.seq, - "event_type": event.event_type, - "data": event.payload, - "trace_id": str(event.trace_id) if event.trace_id else None, - "observation_id": str(event.observation_id) if event.observation_id else None, - "parent_observation_id": ( - str(event.parent_observation_id) if event.parent_observation_id else None - ), - "created_at": event.created_at.isoformat() if event.created_at else None, - } - ) - ) - - await websocket.send_text( - json.dumps( - { - "type": "replay_done", - "run_id": str(run_id), - "last_seq": replay_last_seq, - } - ) - ) - - -run_subscription_handler = RunSubscriptionHandler() diff --git a/backend/app/websocket/run_subscription_manager.py b/backend/app/websocket/run_subscription_manager.py deleted file mode 100644 index c08484f42..000000000 --- a/backend/app/websocket/run_subscription_manager.py +++ /dev/null @@ -1,70 +0,0 @@ -"""In-memory run subscription manager.""" - -from __future__ import annotations - -import json -from collections import defaultdict -from typing import Any - -from fastapi import WebSocket - - -class RunSubscriptionManager: - """Tracks which WebSocket connections are subscribed to which run IDs. - - Maintains bidirectional mappings between connections and run IDs for - efficient fan-out and cleanup. - """ - - def __init__(self) -> None: - self._run_connections: dict[str, set[WebSocket]] = defaultdict(set) - self._connection_runs: dict[WebSocket, set[str]] = defaultdict(set) - - async def add_subscription(self, websocket: WebSocket, run_id: str) -> None: - """Subscribe a connection to events for the given run.""" - self._run_connections[run_id].add(websocket) - self._connection_runs[websocket].add(run_id) - - def remove_subscription(self, websocket: WebSocket, run_id: str) -> None: - """Unsubscribe a connection from a specific run.""" - runs = self._connection_runs.get(websocket) - if runs: - runs.discard(run_id) - if not runs: - self._connection_runs.pop(websocket, None) - - connections = self._run_connections.get(run_id) - if connections: - connections.discard(websocket) - if not connections: - self._run_connections.pop(run_id, None) - - def disconnect(self, websocket: WebSocket) -> None: - """Remove all subscriptions for a disconnected WebSocket.""" - run_ids = list(self._connection_runs.get(websocket, set())) - for run_id in run_ids: - self.remove_subscription(websocket, run_id) - - async def broadcast_event(self, run_id: str, message: dict[str, Any]) -> int: - """Send an event to all connections subscribed to a run. - - Returns: - The number of connections that received the message successfully. - """ - connections = list(self._run_connections.get(run_id, set())) - success_count = 0 - disconnected: list[WebSocket] = [] - for connection in connections: - try: - await connection.send_text(json.dumps(message, default=str)) - success_count += 1 - except Exception: - disconnected.append(connection) - - for connection in disconnected: - self.disconnect(connection) - - return success_count - - -run_subscription_manager = RunSubscriptionManager() diff --git a/backend/env.example b/backend/env.example index 1280743a6..99bfb6563 100644 --- a/backend/env.example +++ b/backend/env.example @@ -30,7 +30,7 @@ WORKERS=4 # 工作进程数(生产环境建议 4 或更多) SECRET_KEY=your-secret-key-change-in-production-CHANGE-THIS-IN-PRODUCTION # JWT 算法 -ALGORITHM=HS256 +JWT_ALGORITHM=HS256 # Token 过期时间 ACCESS_TOKEN_EXPIRE_MINUTES=4320 @@ -113,9 +113,6 @@ LANGFUSE_HOST=https://cloud.langfuse.com CREDENTIAL_ENCRYPTION_KEY= -# Rich CLI 显示配置 -RICH_CLI_ENABLED=false - # ----------------------------------------------------------------------------- # 本地开发端口配置(可选) # ----------------------------------------------------------------------------- diff --git a/backend/pyproject.toml b/backend/pyproject.toml index d947899be..36c6ed80f 100644 --- a/backend/pyproject.toml +++ b/backend/pyproject.toml @@ -37,6 +37,8 @@ dependencies = [ # Observability "langfuse>=3.11.1", "loguru>=0.7.3", + "opentelemetry-api>=1.25.0", + "opentelemetry-sdk>=1.25.0", # Auth & Security "python-jose[cryptography]>=3.3.0", "passlib[bcrypt]>=1.7.4", @@ -61,12 +63,6 @@ dependencies = [ "aiosmtplib>=3.0.0", # Caching "redis>=5.0.0", - # Web scraping & automation - "beautifulsoup4>=4.14.2", - "selenium>=4.38.0", - "mitmproxy>=11.1.3", - # Security analysis - "angr>=9.2.182", # System & Docker "psutil>=7.1.3", "docker>=7.1.0", @@ -106,6 +102,7 @@ dev = [ "pre-commit>=4.0.0", "types-PyYAML>=6.0.0", "types-redis>=4.0.0", + "pyright>=1.1.409", ] [tool.ruff] diff --git a/backend/tests/api/test_artifacts.py b/backend/tests/api/test_artifacts.py deleted file mode 100644 index 926c3c9ec..000000000 --- a/backend/tests/api/test_artifacts.py +++ /dev/null @@ -1,59 +0,0 @@ -from unittest.mock import AsyncMock, MagicMock, patch - -import pytest -from fastapi import FastAPI -from fastapi.testclient import TestClient - -from app.api.v1.artifacts import router -from app.core.database import get_db -from app.models.auth import AuthUser as User - - -async def mock_get_current_user(): - user = MagicMock(spec=User) - user.id = "user-123" - return user - - -async def mock_get_db(): - yield AsyncMock() - - -@pytest.fixture -def client(): - test_app = FastAPI() - test_app.include_router(router) - - from app.common.dependencies import get_current_user - - test_app.dependency_overrides[get_current_user] = mock_get_current_user - test_app.dependency_overrides[get_db] = mock_get_db - - with TestClient(test_app) as c: - yield c - - -@patch("app.services.sandbox_manager._sandbox_pool") -@patch("app.services.sandbox_manager.SandboxManagerService") -def test_live_read_file_returns_raw_content_for_ui(mock_service_cls, mock_pool, client: TestClient) -> None: - record = MagicMock() - record.id = "sandbox-1" - - mock_service = mock_service_cls.return_value - mock_service.get_user_sandbox_record = AsyncMock(return_value=record) - - adapter = MagicMock() - adapter.is_started.return_value = True - adapter.read.return_value = " 1\talpha\n 2\tbeta" - adapter.raw_read.return_value = "alpha\nbeta" - - mock_pool.get = AsyncMock(return_value=adapter) - mock_pool.release = AsyncMock(return_value=None) - - response = client.get("/v1/artifacts/thread-1/live/skills/demo/SKILL.md") - - assert response.status_code == 200 - assert response.text == "alpha\nbeta" - adapter.raw_read.assert_called_once_with("skills/demo/SKILL.md") - adapter.read.assert_not_called() - mock_pool.release.assert_awaited_once_with("sandbox-1") diff --git a/backend/tests/api/test_sandboxes.py b/backend/tests/api/test_sandboxes.py deleted file mode 100644 index e53ffd5a3..000000000 --- a/backend/tests/api/test_sandboxes.py +++ /dev/null @@ -1,169 +0,0 @@ -""" -Tests for Admin Sandbox API -""" - -from unittest.mock import AsyncMock, MagicMock, patch - -import pytest -from fastapi import FastAPI -from fastapi.testclient import TestClient - -from app.api.v1.sandboxes import router -from app.core.database import get_db -from app.models.auth import AuthUser as User - -# Create a minimal app for testing -app = FastAPI() -app.include_router(router) - - -# Mock dependencies -async def mock_get_current_user(): - user = MagicMock(spec=User) - user.id = "admin-user" - user.is_super_user = True - return user - - -async def mock_get_db(): - yield AsyncMock() - - -@pytest.fixture -def client(): - # Create a fresh app for each test to avoid pollution - test_app = FastAPI() - test_app.include_router(router) - - # Import the exact dependency function object to override - from app.common.dependencies import get_current_user - - # helper to override - test_app.dependency_overrides[get_current_user] = mock_get_current_user - test_app.dependency_overrides[get_db] = mock_get_db - - with TestClient(test_app) as c: - yield c - - -@patch("app.api.v1.sandboxes.SandboxManagerService") -def test_list_sandboxes(mock_service_cls, client): - # Setup - mock_db = AsyncMock() - # update override for this specific test - client.app.dependency_overrides[get_db] = lambda: mock_db - - from datetime import datetime - - # Mock DB execution - sandbox_item = MagicMock() - sandbox_item.id = "sandbox-1" - sandbox_item.user_id = "user-1" - sandbox_item.status = "running" - sandbox_item.image = "img" - sandbox_item.created_at = datetime(2023, 1, 1) - sandbox_item.updated_at = datetime(2023, 1, 1) - sandbox_item.idle_timeout = 300 - sandbox_item.runtime = "runc" - sandbox_item.container_id = "cid" - sandbox_item.last_active_at = None - sandbox_item.error_message = None - sandbox_item.cpu_limit = 1.0 - sandbox_item.memory_limit = 512 - - sandbox_item.user.name = "Test User" - sandbox_item.user.email = "test@example.com" - - # scalars().all() returns list - # scalar_one() returns int - mock_count_result = MagicMock() - mock_count_result.scalar_one.return_value = 1 - - mock_list_result = MagicMock() - mock_list_result.scalars.return_value.all.return_value = [sandbox_item] - - mock_db.execute.side_effect = [mock_count_result, mock_list_result] - - # Run - response = client.get("/v1/sandboxes") - - # Verify - assert response.status_code == 200 - data = response.json() - assert data["total"] == 1 - - -@patch("app.api.v1.sandboxes.SandboxManagerService") -def test_stop_sandbox(mock_service_cls, client): - # Setup - mock_service = mock_service_cls.return_value - mock_service.stop_sandbox = AsyncMock(return_value=True) - - # Run - response = client.post("/v1/sandboxes/sb-1/stop") - - # Verify - assert response.status_code == 200 - assert response.json()["success"] is True - mock_service.stop_sandbox.assert_called_once_with("sb-1") - - -@patch("app.api.v1.sandboxes.SandboxManagerService") -def test_stop_sandbox_not_found(mock_service_cls, client): - # Setup - mock_service = mock_service_cls.return_value - mock_service.stop_sandbox = AsyncMock(return_value=False) - - # Run - response = client.post("/v1/sandboxes/sb-unknown/stop") - - # Verify - assert response.status_code == 404 - - -@patch("app.api.v1.sandboxes.SandboxManagerService") -def test_rebuild_sandbox(mock_service_cls, client): - mock_service = mock_service_cls.return_value - mock_service.rebuild_sandbox = AsyncMock(return_value=True) - response = client.post("/v1/sandboxes/sb-1/rebuild") - assert response.status_code == 200 - mock_service.rebuild_sandbox.assert_called_once_with("sb-1") - - -@patch("app.api.v1.sandboxes.SandboxManagerService") -def test_rebuild_sandbox_not_found(mock_service_cls, client): - mock_service = mock_service_cls.return_value - mock_service.rebuild_sandbox = AsyncMock(return_value=False) - response = client.post("/v1/sandboxes/sb-unknown/rebuild") - assert response.status_code == 404 - - -@patch("app.api.v1.sandboxes.SandboxManagerService") -def test_update_sandbox(mock_service_cls, client): - mock_service = mock_service_cls.return_value - mock_service.update_sandbox_config = AsyncMock(return_value=True) - response = client.patch("/v1/sandboxes/sb-1", json={"image": "python:3.11-slim"}) - assert response.status_code == 200 - mock_service.update_sandbox_config.assert_called_once_with("sb-1", image="python:3.11-slim") - - -@patch("app.api.v1.sandboxes.SandboxManagerService") -def test_update_sandbox_not_found(mock_service_cls, client): - mock_service = mock_service_cls.return_value - mock_service.update_sandbox_config = AsyncMock(return_value=False) - response = client.patch("/v1/sandboxes/sb-unknown", json={"image": "python:3.11-slim"}) - assert response.status_code == 404 - - -@patch("app.api.v1.sandboxes.SandboxManagerService") -def test_delete_sandbox(mock_service_cls, client): - # Setup - mock_service = mock_service_cls.return_value - mock_service.delete_sandbox = AsyncMock(return_value=True) - - # Run - response = client.delete("/v1/sandboxes/sb-1") - - # Verify - assert response.status_code == 200 - mock_service.delete_sandbox.assert_called_once_with("sb-1") diff --git a/backend/tests/backends/__init__.py b/backend/tests/backends/__init__.py deleted file mode 100644 index 09d41c4f4..000000000 --- a/backend/tests/backends/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# Backend tests package diff --git a/backend/tests/backends/test_file_tracking_proxy.py b/backend/tests/backends/test_file_tracking_proxy.py deleted file mode 100644 index 17155072d..000000000 --- a/backend/tests/backends/test_file_tracking_proxy.py +++ /dev/null @@ -1,111 +0,0 @@ -from unittest.mock import MagicMock - -from deepagents.backends.protocol import EditResult, FileUploadResponse, WriteResult - -from app.core.agent.backends.file_tracking_proxy import FileTrackingProxy -from app.utils.file_event_emitter import FileEventEmitter - - -def _make_mock_backend(): - backend = MagicMock() - backend.id = "test-sandbox" - backend.is_started.return_value = True - return backend - - -def test_write_success_emits_event(): - backend = _make_mock_backend() - backend.write.return_value = WriteResult(path="/app/hello.py", files_update=None) - emitter = FileEventEmitter() - proxy = FileTrackingProxy(backend, emitter) - - result = proxy.write("/app/hello.py", "print('hi')") - assert result.path == "/app/hello.py" - events = emitter.drain() - assert len(events) == 1 - assert events[0].action == "write" - assert events[0].path == "/app/hello.py" - assert events[0].size == len("print('hi')".encode("utf-8")) - - -def test_write_error_does_not_emit(): - backend = _make_mock_backend() - backend.write.return_value = WriteResult(error="File exists") - emitter = FileEventEmitter() - proxy = FileTrackingProxy(backend, emitter) - - result = proxy.write("/app/hello.py", "x") - assert result.error - assert emitter.drain() == [] - - -def test_edit_success_emits_event(): - backend = _make_mock_backend() - backend.edit.return_value = EditResult(path="/app/hello.py", files_update=None, occurrences=1) - emitter = FileEventEmitter() - proxy = FileTrackingProxy(backend, emitter) - - result = proxy.edit("/app/hello.py", "old", "new") - assert result.path == "/app/hello.py" - events = emitter.drain() - assert len(events) == 1 - assert events[0].action == "edit" - - -def test_write_overwrite_emits_write(): - backend = _make_mock_backend() - backend.write_overwrite.return_value = WriteResult(path="/app/a.py", files_update=None) - emitter = FileEventEmitter() - proxy = FileTrackingProxy(backend, emitter) - - proxy.write_overwrite("/app/a.py", "content") - events = emitter.drain() - assert events[0].action == "write" - - -def test_upload_files_emits_per_file(): - backend = _make_mock_backend() - backend.upload_files.return_value = [ - FileUploadResponse(path="/app/a.py", error=None), - FileUploadResponse(path="/app/b.py", error="fail"), - ] - emitter = FileEventEmitter() - proxy = FileTrackingProxy(backend, emitter) - - proxy.upload_files([("/app/a.py", b"aa"), ("/app/b.py", b"bb")]) - events = emitter.drain() - assert len(events) == 1 - assert events[0].path == "/app/a.py" - - -def test_read_delegates_without_emit(): - backend = _make_mock_backend() - backend.read.return_value = "file content" - emitter = FileEventEmitter() - proxy = FileTrackingProxy(backend, emitter) - - result = proxy.read("/app/hello.py") - assert result == "file content" - assert emitter.drain() == [] - backend.read.assert_called_once_with("/app/hello.py") - - -def test_raw_read_delegates_without_emit(): - backend = _make_mock_backend() - backend.raw_read.return_value = "raw file content" - emitter = FileEventEmitter() - proxy = FileTrackingProxy(backend, emitter) - - result = proxy.raw_read("/app/hello.py") - assert result == "raw file content" - assert emitter.drain() == [] - backend.raw_read.assert_called_once_with("/app/hello.py") - - -def test_getattr_fallback(): - backend = _make_mock_backend() - backend.some_new_method.return_value = "ok" - emitter = FileEventEmitter() - proxy = FileTrackingProxy(backend, emitter) - - assert proxy.some_new_method() == "ok" diff --git a/backend/tests/backends/test_pydantic_adapter.py b/backend/tests/backends/test_pydantic_adapter.py deleted file mode 100644 index fb34fb973..000000000 --- a/backend/tests/backends/test_pydantic_adapter.py +++ /dev/null @@ -1,452 +0,0 @@ -"""Tests for PydanticSandboxAdapter and RuntimeConfig. - -These tests verify the new features added to pydantic_adapter.py: -- RuntimeConfig data class -- BUILTIN_RUNTIMES predefined configurations -- _resolve_runtime() method -- Extended constructor parameters (runtime, session_id, idle_timeout, volumes) -""" - -import pytest - -from app.core.agent.backends.pydantic_adapter import ( - BUILTIN_RUNTIMES, - PydanticSandboxAdapter, - RuntimeConfig, - get_builtin_runtime, - list_builtin_runtimes, -) -from app.core.agent.backends.runtime_config import resolve_runtime - - -class TestRuntimeConfig: - """Tests for RuntimeConfig dataclass.""" - - def test_create_with_defaults(self): - """Test creating RuntimeConfig with default values.""" - config = RuntimeConfig(name="test") - assert config.name == "test" - assert config.base_image == "python:3.12-slim" - assert config.packages == [] - assert config.setup_commands == [] - assert config.env_vars == {} - - def test_create_with_all_parameters(self): - """Test creating RuntimeConfig with all parameters.""" - config = RuntimeConfig( - name="custom-ml", - base_image="python:3.11-slim", - packages=["torch", "numpy"], - setup_commands=["pip install --upgrade pip"], - env_vars={"CUDA_VISIBLE_DEVICES": "0"}, - ) - assert config.name == "custom-ml" - assert config.base_image == "python:3.11-slim" - assert config.packages == ["torch", "numpy"] - assert config.setup_commands == ["pip install --upgrade pip"] - assert config.env_vars == {"CUDA_VISIBLE_DEVICES": "0"} - - def test_to_pydantic_runtime(self): - """Test conversion to pydantic-ai-backend RuntimeConfig.""" - config = RuntimeConfig( - name="test", - base_image="python:3.12", - packages=["requests"], - ) - # to_pydantic_runtime returns self if pydantic RuntimeConfig is not available - result = config.to_pydantic_runtime() - # Should either return the same config or a PydanticRuntimeConfig - assert hasattr(result, "name") - assert result.name == "test" - - -class TestBuiltinRuntimes: - """Tests for BUILTIN_RUNTIMES and related functions.""" - - def test_builtin_runtimes_exist(self): - """Test that BUILTIN_RUNTIMES dictionary is populated.""" - assert len(BUILTIN_RUNTIMES) > 0 - assert "python-minimal" in BUILTIN_RUNTIMES - assert "python-datascience" in BUILTIN_RUNTIMES - assert "python-web" in BUILTIN_RUNTIMES - - def test_python_minimal_runtime(self): - """Test python-minimal runtime configuration.""" - runtime = BUILTIN_RUNTIMES["python-minimal"] - assert runtime.name == "python-minimal" - assert runtime.base_image == "python:3.12-slim" - assert runtime.packages == [] - - def test_python_datascience_runtime(self): - """Test python-datascience runtime configuration.""" - runtime = BUILTIN_RUNTIMES["python-datascience"] - assert runtime.name == "python-datascience" - assert "pandas" in runtime.packages - assert "numpy" in runtime.packages - assert "matplotlib" in runtime.packages - assert "scikit-learn" in runtime.packages - - def test_python_web_runtime(self): - """Test python-web runtime configuration.""" - runtime = BUILTIN_RUNTIMES["python-web"] - assert runtime.name == "python-web" - assert "fastapi" in runtime.packages - assert "uvicorn" in runtime.packages - assert "httpx" in runtime.packages - - def test_python_ml_runtime(self): - """Test python-ml runtime configuration.""" - runtime = BUILTIN_RUNTIMES["python-ml"] - assert runtime.name == "python-ml" - assert "torch" in runtime.packages - assert "transformers" in runtime.packages - - def test_node_minimal_runtime(self): - """Test node-minimal runtime configuration.""" - runtime = BUILTIN_RUNTIMES["node-minimal"] - assert runtime.name == "node-minimal" - assert runtime.base_image == "node:20-slim" - assert runtime.packages == [] - - def test_node_react_runtime(self): - """Test node-react runtime configuration.""" - runtime = BUILTIN_RUNTIMES["node-react"] - assert runtime.name == "node-react" - assert runtime.base_image == "node:20-slim" - assert "react" in runtime.packages - assert "typescript" in runtime.packages - - def test_get_builtin_runtime_exists(self): - """Test get_builtin_runtime with existing runtime.""" - runtime = get_builtin_runtime("python-datascience") - assert runtime is not None - assert runtime.name == "python-datascience" - - def test_get_builtin_runtime_not_exists(self): - """Test get_builtin_runtime with non-existing runtime.""" - runtime = get_builtin_runtime("non-existent") - assert runtime is None - - def test_list_builtin_runtimes(self): - """Test list_builtin_runtimes function.""" - runtimes = list_builtin_runtimes() - assert isinstance(runtimes, list) - assert "python-minimal" in runtimes - assert "python-datascience" in runtimes - assert "python-web" in runtimes - assert "python-ml" in runtimes - assert "node-minimal" in runtimes - assert "node-react" in runtimes - - -class TestResolveRuntime: - """Tests for resolve_runtime function.""" - - def test_resolve_none_runtime(self): - """Test resolving None runtime uses default image.""" - image, config = resolve_runtime( - "python:3.12-slim", - None, - ) - assert image == "python:3.12-slim" - assert config is None - - def test_resolve_builtin_runtime_string(self): - """Test resolving builtin runtime by name.""" - image, config = resolve_runtime( - "default:image", - "python-datascience", - ) - assert image == "python:3.12-slim" - assert config is not None - assert config.name == "python-datascience" - assert "pandas" in config.packages - - def test_resolve_image_string_with_colon(self): - """Test resolving image string (contains ':').""" - image, config = resolve_runtime( - "default:image", - "custom/image:v1.0", - ) - assert image == "custom/image:v1.0" - assert config is None - - def test_resolve_image_string_with_slash(self): - """Test resolving image string (contains '/').""" - image, config = resolve_runtime( - "default:image", - "docker.io/library/python", - ) - assert image == "docker.io/library/python" - assert config is None - - def test_resolve_custom_runtime_config(self): - """Test resolving custom RuntimeConfig instance.""" - custom_config = RuntimeConfig( - name="custom", - base_image="python:3.11", - packages=["custom-package"], - ) - image, config = resolve_runtime( - "default:image", - custom_config, - ) - assert image == "python:3.11" - assert config is not None - assert config.name == "custom" - assert "custom-package" in config.packages - - def test_resolve_unknown_string_as_image(self): - """Test resolving unknown string (not builtin, no special chars).""" - image, config = resolve_runtime( - "default:image", - "unknown-runtime", - ) - # Should treat as image name (with warning logged) - assert image == "unknown-runtime" - assert config is None - - -class TestPydanticSandboxAdapterInit: - """Tests for PydanticSandboxAdapter initialization (without Docker). - - These tests verify constructor parameter handling without actually - creating Docker containers. - """ - - def test_import_error_without_backend(self): - """Test ImportError is raised when pydantic-ai-backend is not available.""" - pytest.skip("pydantic-ai-backend[docker] is a required dependency; skipping missing-dependency branch test") - - -class TestPydanticSandboxAdapterProperties: - """Tests for PydanticSandboxAdapter instance properties. - - These tests use mocking to avoid creating actual Docker containers. - """ - - @pytest.fixture - def mock_docker_sandbox(self, monkeypatch): - """Mock DockerSandbox to avoid creating actual containers.""" - from unittest.mock import MagicMock - - mock_sandbox = MagicMock() - mock_sandbox.start = MagicMock() - mock_sandbox.stop = MagicMock() - monkeypatch.setattr( - "app.core.agent.backends.pydantic_adapter.DockerSandbox", - MagicMock(return_value=mock_sandbox), - ) - return mock_sandbox - - def test_adapter_stores_runtime_config(self, mock_docker_sandbox): - """Test that adapter stores runtime configuration.""" - adapter = PydanticSandboxAdapter(runtime="python-datascience") - config = adapter.get_runtime_config() - assert config is not None - assert config.name == "python-datascience" - - def test_adapter_stores_session_id(self, mock_docker_sandbox): - """Test that adapter stores session_id.""" - adapter = PydanticSandboxAdapter(session_id="test-session-123") - assert adapter.session_id == "test-session-123" - assert adapter.id == "test-session-123" - - def test_adapter_stores_idle_timeout(self, mock_docker_sandbox): - """Test that adapter stores idle_timeout.""" - adapter = PydanticSandboxAdapter(idle_timeout=1800) - assert adapter.idle_timeout == 1800 - - def test_adapter_stores_volumes(self, mock_docker_sandbox): - """Test that adapter stores volumes.""" - volumes = {"/host/data": "/container/data"} - adapter = PydanticSandboxAdapter(volumes=volumes) - assert adapter.volumes == volumes - - def test_adapter_image_from_runtime(self, mock_docker_sandbox): - """Test that adapter uses image from runtime config.""" - adapter = PydanticSandboxAdapter(runtime="node-minimal") - assert adapter.image == "node:20-slim" - - -class TestPydanticSandboxAdapterFileOps: - """Tests for PydanticSandboxAdapter file operations. - - These tests verify that read/write/edit methods properly delegate - to the upstream DockerSandbox methods. - """ - - @pytest.fixture - def adapter_with_mock(self, monkeypatch): - """Create adapter with mocked DockerSandbox.""" - from unittest.mock import MagicMock - - mock_sandbox = MagicMock() - mock_sandbox.start = MagicMock() - mock_sandbox.stop = MagicMock() - - # Mock execute for file existence check (file doesn't exist by default) - mock_sandbox.execute = MagicMock(return_value=MagicMock(output="", exit_code=1)) - - # Patch DockerSandbox constructor - monkeypatch.setattr( - "app.core.agent.backends.pydantic_adapter.DockerSandbox", - MagicMock(return_value=mock_sandbox), - ) - - adapter = PydanticSandboxAdapter() - return adapter, mock_sandbox - - def test_read_delegates_to_sandbox(self, adapter_with_mock): - """Test that read() delegates to self._sandbox.read().""" - adapter, mock_sandbox = adapter_with_mock - mock_sandbox.read = lambda path, offset, limit: "line1\nline2\nline3" - - result = adapter.read("/workspace/test.txt", offset=0, limit=10) - - # Should return formatted content with line numbers - assert "line1" in result - assert "line2" in result - - def test_raw_read_returns_unformatted_content(self, adapter_with_mock): - """Test that raw_read() returns original text without line numbers.""" - adapter, mock_sandbox = adapter_with_mock - mock_sandbox.read = lambda path, offset, limit: "line1\nline2\nline3" - - result = adapter.raw_read("/workspace/test.txt", offset=0, limit=10) - - assert result == "line1\nline2\nline3" - - def test_read_handles_error_from_sandbox(self, adapter_with_mock): - """Test that read() handles errors from upstream.""" - adapter, mock_sandbox = adapter_with_mock - mock_sandbox.read = lambda path, offset, limit: "[Error: File not found]" - - result = adapter.read("/workspace/nonexistent.txt") - - assert "Error" in result - - def test_write_delegates_to_sandbox(self, adapter_with_mock): - """Test that write() delegates to self._sandbox.write().""" - from unittest.mock import MagicMock - - adapter, mock_sandbox = adapter_with_mock - mock_write_result = MagicMock(error=None, path="/workspace/test.txt") - mock_sandbox.write = MagicMock(return_value=mock_write_result) - - result = adapter.write("/workspace/test.txt", "content") - - mock_sandbox.write.assert_called_once_with("/workspace/test.txt", "content") - assert result.path == "/workspace/test.txt" - assert result.error is None - - def test_write_prevents_overwrite_existing(self, adapter_with_mock): - """Test that write() prevents overwriting existing files.""" - from unittest.mock import MagicMock - - adapter, mock_sandbox = adapter_with_mock - # Simulate file exists (exit_code=0) - mock_sandbox.execute = MagicMock(return_value=MagicMock(output="", exit_code=0)) - - result = adapter.write("/workspace/existing.txt", "new content") - - assert result.error is not None - assert "already exists" in result.error - - def test_write_overwrite_delegates_to_sandbox(self, adapter_with_mock): - """Test that write_overwrite() delegates to self._sandbox.write().""" - from unittest.mock import MagicMock - - adapter, mock_sandbox = adapter_with_mock - mock_write_result = MagicMock(error=None, path="/workspace/test.txt") - mock_sandbox.write = MagicMock(return_value=mock_write_result) - - result = adapter.write_overwrite("/workspace/test.txt", "new content") - - mock_sandbox.write.assert_called_once_with("/workspace/test.txt", "new content") - assert result.path == "/workspace/test.txt" - assert result.error is None - - def test_edit_delegates_to_sandbox(self, adapter_with_mock): - """Test that edit() delegates to self._sandbox.edit().""" - from unittest.mock import MagicMock - - adapter, mock_sandbox = adapter_with_mock - mock_edit_result = MagicMock(error=None, path="/workspace/test.txt", occurrences=2) - mock_sandbox.edit = MagicMock(return_value=mock_edit_result) - - result = adapter.edit("/workspace/test.txt", "old", "new", replace_all=True) - - mock_sandbox.edit.assert_called_once_with("/workspace/test.txt", "old", "new", True) - assert result.path == "/workspace/test.txt" - assert result.occurrences == 2 - assert result.error is None - - def test_edit_handles_error_from_sandbox(self, adapter_with_mock): - """Test that edit() handles errors from upstream.""" - from unittest.mock import MagicMock - - adapter, mock_sandbox = adapter_with_mock - mock_edit_result = MagicMock(error="String not found") - mock_sandbox.edit = MagicMock(return_value=mock_edit_result) - - result = adapter.edit("/workspace/test.txt", "not found", "new") - - assert result.error == "String not found" - - def test_write_handles_special_characters(self, adapter_with_mock): - """Test that write() handles special characters in content.""" - from unittest.mock import MagicMock - - adapter, mock_sandbox = adapter_with_mock - mock_write_result = MagicMock(error=None, path="/workspace/test.txt") - mock_sandbox.write = MagicMock(return_value=mock_write_result) - - # Content with special characters that would break shell commands - special_content = "echo 'hello'\n$VAR\n`command`\n\"quotes\"\n!#$%&" - - result = adapter.write("/workspace/test.txt", special_content) - - # Should delegate to upstream without modification - mock_sandbox.write.assert_called_once_with("/workspace/test.txt", special_content) - assert result.error is None - - def test_write_handles_large_content(self, adapter_with_mock): - """Test that write() handles large content (> 1MB).""" - from unittest.mock import MagicMock - - adapter, mock_sandbox = adapter_with_mock - mock_write_result = MagicMock(error=None, path="/workspace/large.txt") - mock_sandbox.write = MagicMock(return_value=mock_write_result) - - # Large content (1.5 MB) - would fail with base64+echo approach - large_content = "x" * (1024 * 1024 + 500000) # 1.5 MB - - result = adapter.write("/workspace/large.txt", large_content) - - # Should delegate to upstream without size issues - mock_sandbox.write.assert_called_once() - assert result.error is None - - -class TestSandboxFactoryDockerSandbox: - """Tests for create_docker_sandbox factory function.""" - - def test_import_create_docker_sandbox(self): - """Test that create_docker_sandbox can be imported.""" - from app.core.tools.sandbox.sandbox_factory import create_docker_sandbox - - assert callable(create_docker_sandbox) - - def test_docker_in_sandbox_providers(self): - """Test that docker is in sandbox providers.""" - from app.core.tools.sandbox.sandbox_factory import get_available_sandbox_types - - types = get_available_sandbox_types() - assert "docker" in types - - def test_docker_working_dir(self): - """Test that docker working dir is /workspace.""" - from app.core.tools.sandbox.sandbox_factory import get_default_working_dir - - assert get_default_working_dir("docker") == "/workspace" diff --git a/backend/tests/core/__init__.py b/backend/tests/core/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/backend/tests/core/copilot/test_action_applier.py b/backend/tests/core/copilot/test_action_applier.py deleted file mode 100644 index e5d5aa525..000000000 --- a/backend/tests/core/copilot/test_action_applier.py +++ /dev/null @@ -1,111 +0,0 @@ -""" -Contract tests for apply_actions_to_graph_state. - -Uses shared fixtures from docs/schemas/copilot-apply-fixtures.json to ensure -backend apply logic stays consistent with the contract (and with frontend ActionProcessor). -""" - -import importlib.util -import json -from pathlib import Path - -import pytest - -# Import only action_applier to avoid pulling in langchain/copilot agent dependencies -_action_applier_path = ( - Path(__file__).resolve().parent.parent.parent.parent / "app" / "core" / "copilot" / "action_applier.py" -) -_spec = importlib.util.spec_from_file_location("action_applier", _action_applier_path) -_action_applier = importlib.util.module_from_spec(_spec) -_spec.loader.exec_module(_action_applier) -apply_actions_to_graph_state = _action_applier.apply_actions_to_graph_state - -FIXTURES_PATH = ( - Path(__file__).resolve().parent.parent.parent.parent.parent / "docs" / "schemas" / "copilot-apply-fixtures.json" -) - - -def _load_fixtures(): - if not FIXTURES_PATH.exists(): - return [] - with open(FIXTURES_PATH, encoding="utf-8") as f: - return json.load(f) - - -APPLY_FIXTURES = _load_fixtures() - - -def _normalize_nodes(nodes): - """Sort nodes by id for stable comparison.""" - return sorted(nodes, key=lambda n: n.get("id", "")) - - -def _normalize_edges(edges): - """Sort edges by id for stable comparison.""" - return sorted(edges, key=lambda e: e.get("id", "")) - - -def _node_contract_match(got_node, want_node): - """Check that got_node matches contract: id, type, position, data.label, data.type, and config superset.""" - if got_node.get("id") != want_node.get("id"): - return False - if got_node.get("type") != want_node.get("type"): - return False - if got_node.get("position") != want_node.get("position"): - return False - got_data = got_node.get("data") or {} - want_data = want_node.get("data") or {} - if got_data.get("label") != want_data.get("label"): - return False - if got_data.get("type") != want_data.get("type"): - return False - want_config = want_data.get("config") or {} - got_config = got_data.get("config") or {} - for k, v in want_config.items(): - if got_config.get(k) != v: - return False - return True - - -def _edge_contract_match(got_edge, want_edge): - """Check that got_edge matches contract: id, source, target.""" - return ( - got_edge.get("id") == want_edge.get("id") - and got_edge.get("source") == want_edge.get("source") - and got_edge.get("target") == want_edge.get("target") - ) - - -@pytest.mark.parametrize( - "case_index", - range(len(APPLY_FIXTURES)) if APPLY_FIXTURES else [0], - ids=[APPLY_FIXTURES[i]["name"] for i in range(len(APPLY_FIXTURES))] if APPLY_FIXTURES else ["no_fixtures"], -) -def test_apply_actions_contract(case_index): - """Each fixture case: apply actions and assert result matches expected (contract match).""" - if not APPLY_FIXTURES: - pytest.skip(f"Fixtures not found: {FIXTURES_PATH}") - data = APPLY_FIXTURES[case_index] - name = data.get("name", f"case_{case_index}") - initial_nodes = data["initial_nodes"] - initial_edges = data["initial_edges"] - actions = data["actions"] - expected_nodes = data["expected_nodes"] - expected_edges = data["expected_edges"] - - got_nodes, got_edges = apply_actions_to_graph_state( - [n.copy() for n in initial_nodes], - [e.copy() for e in initial_edges], - actions, - ) - got_nodes = _normalize_nodes(got_nodes) - got_edges = _normalize_edges(got_edges) - exp_nodes = _normalize_nodes(expected_nodes) - exp_edges = _normalize_edges(expected_edges) - - assert len(got_nodes) == len(exp_nodes), f"{name}: node count mismatch" - assert len(got_edges) == len(exp_edges), f"{name}: edge count mismatch" - for i, (g, e) in enumerate(zip(got_nodes, exp_nodes)): - assert _node_contract_match(g, e), f"{name}: node[{i}] contract mismatch: got {g}" - for i, (g, e) in enumerate(zip(got_edges, exp_edges)): - assert _edge_contract_match(g, e), f"{name}: edge[{i}] contract mismatch: got {g}" diff --git a/backend/tests/core/graph/__init__.py b/backend/tests/core/graph/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/backend/tests/core/graph/test_runtime_prompt_template.py b/backend/tests/core/graph/test_runtime_prompt_template.py deleted file mode 100644 index a21e068bf..000000000 --- a/backend/tests/core/graph/test_runtime_prompt_template.py +++ /dev/null @@ -1,120 +0,0 @@ -import sys -import types -import uuid -from pathlib import Path - -from app.models.graph import AgentGraph - - -def _import_runtime_renderer(): - graph_package = "app.core.graph" - root = Path(__file__).resolve().parents[3] - stub = types.ModuleType(graph_package) - stub.__path__ = [str(root / "app" / "core" / "graph")] - tracker = {} - tracker[graph_package] = sys.modules.get(graph_package) - tracker[f"{graph_package}.runtime_prompt_template"] = sys.modules.get(f"{graph_package}.runtime_prompt_template") - sys.modules[graph_package] = stub - try: - from app.core.graph.runtime_prompt_template import ( - build_runtime_prompt_context, - extract_runtime_template_variables, - get_prompt_text_from_config, - render_runtime_template, - ) - finally: - for name, original in tracker.items(): - if original is None: - sys.modules.pop(name, None) - else: - sys.modules[name] = original - return ( - render_runtime_template, - get_prompt_text_from_config, - extract_runtime_template_variables, - build_runtime_prompt_context, - ) - - -( - render_runtime_template, - get_prompt_text_from_config, - extract_runtime_template_variables, - build_runtime_prompt_context, -) = _import_runtime_renderer() - - -def test_known_placeholders_are_replaced(): - rendered = render_runtime_template( - "Thread {thread_id} repo {target_repo}", - {"thread_id": 42, "target_repo": "main"}, - ) - assert rendered == "Thread 42 repo main" - - -def test_missing_placeholders_stay_literal(): - rendered = render_runtime_template( - "Missing {nothing} still literal", - {}, - ) - assert rendered == "Missing {nothing} still literal" - - -def test_non_string_values_converted_none_untouched(): - rendered = render_runtime_template( - "Count {count} optional {optional}", - {"count": 7, "optional": None}, - ) - assert rendered == "Count 7 optional {optional}" - - -def test_mustache_placeholders_remain_when_context_has_key(): - rendered = render_runtime_template( - "{{mustache}}", - {"mustache": "X"}, - ) - assert rendered == "{{mustache}}" - - -def test_unsupported_placeholder_shapes_ignored(): - rendered = render_runtime_template( - "Shallow {user.id} {vars['name']} {vars[\"name\"]} {{mustache}}", - {"user": "irrelevant", "vars": {"name": "ignored"}}, - ) - assert rendered == "Shallow {user.id} {vars['name']} {vars[\"name\"]} {{mustache}}" - - -def test_none_input_returns_none(): - assert render_runtime_template(None, {"anything": "value"}) is None - - -def test_get_prompt_text_from_config_prefers_supported_keys(): - assert get_prompt_text_from_config({"systemPrompt": "A", "prompt": "B"}) == "A" - assert get_prompt_text_from_config({"system_prompt": "A", "prompt": "B"}) == "A" - assert get_prompt_text_from_config({"prompt": "B"}) == "B" - assert get_prompt_text_from_config({}) is None - - -def test_extract_runtime_template_variables_only_returns_supported_shapes(): - variables = extract_runtime_template_variables( - "T={thread_id} P={project} {{mustache}} {user.id} {vars['name']} {missing_key}" - ) - assert variables == {"thread_id", "project", "missing_key"} - - -def test_build_runtime_prompt_context_merges_built_ins_and_graph_context(): - graph = AgentGraph( - id=uuid.uuid4(), - name="Prompt Graph", - user_id="owner-user", - workspace_id=uuid.uuid4(), - variables={"context": {"thread_id": "override-thread", "project": "alpha"}}, - ) - - context = build_runtime_prompt_context(graph, user_id=uuid.uuid4(), thread_id="thread-123") - - assert context["thread_id"] == "override-thread" - assert context["project"] == "alpha" - assert context["graph_id"] == str(graph.id) - assert context["workspace_id"] == str(graph.workspace_id) - assert context["graph_name"] == "Prompt Graph" diff --git a/backend/tests/dynamic_engine/__init__.py b/backend/tests/dynamic_engine/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/backend/tests/dynamic_engine/mcp_server_test.py b/backend/tests/dynamic_engine/mcp_server_test.py deleted file mode 100644 index 4ab5d1283..000000000 --- a/backend/tests/dynamic_engine/mcp_server_test.py +++ /dev/null @@ -1,159 +0,0 @@ -""" -MCP Client Demo using official MCP SDK - -Simple demonstration of using MCP client to connect to server and call tools. -""" - -import asyncio -import json -import sys -from datetime import timedelta -from pathlib import Path - -# Add backend to path -backend_path = Path(__file__).parent.parent.parent -sys.path.insert(0, str(backend_path)) - -from mcp import ClientSession # noqa: E402 -from mcp.client.sse import sse_client # noqa: E402 -from mcp.client.stdio import stdio_client # noqa: E402 -from mcp.client.streamable_http import streamablehttp_client # noqa: E402 - - -async def demo_stdio(): - """Demo: Connect to MCP server via stdio.""" - print("\n=== Demo 1: STDIO Transport ===") - - # Create stdio client - stdio_params = { - "command": "python", - "args": ["-m", "mcp.server"], # Replace with your MCP server command - } - - try: - async with stdio_client(**stdio_params) as streams: - async with ClientSession(streams[0], streams[1]) as session: - # Initialize session - await session.initialize() - - # List available tools - tools = await session.list_tools() - print(f"Available tools ({len(tools.tools)}):") - for tool in tools.tools[:10]: - print(f" - {tool.name}: {tool.description}") - - # Call a tool - if tools.tools: - result = await session.call_tool(tools.tools[0].name, {}) - print(f"\nTool result: {result}") - - except Exception as e: - print(f"Error: {e}") - - -async def demo_sse(host: str = "http://localhost:8000/sse"): - """Demo: Connect to MCP server via SSE.""" - print("\n=== Demo 2: SSE Transport ===") - - try: - async with sse_client(host) as streams: - async with ClientSession(streams[0], streams[1]) as session: - # Initialize session - await session.initialize() - - # List available tools - tools = await session.list_tools() - print(f"Available tools ({len(tools.tools)}):") - for tool in tools.tools[:10]: - print(f" - {tool.name}: {tool.description}") - - while True: - tool_name = input("Enter tool name: ") - input_json = input("Enter kwargs: ") - kwargs = {} - try: - kwargs = json.loads(input_json) - except Exception: - try: - kwargs = eval(input_json) - except Exception as e: - print(f"Error: {e}") - continue - result = await session.call_tool(tool_name, arguments=kwargs) - print(f"\nTool result: {result}") - - except Exception as e: - print(f"Error: {e}") - - -async def demo_streamable_http(): - """Demo: Connect to MCP server via Streamable HTTP.""" - print("\n=== Demo 3: Streamable HTTP Transport ===") - - try: - async with streamablehttp_client("http://localhost:8000/mcp") as streams: - async with ClientSession(streams[0], streams[1], read_timeout_seconds=timedelta(seconds=30)) as session: - # Initialize session - await session.initialize() - - # List available tools - tools = await session.list_tools() - print(f"Available tools ({len(tools.tools)}):") - for tool in tools.tools[:10]: - print(f" - {tool.name}") - - # Call a tool - result = await session.call_tool( - "execute_shell_command", {"command": "echo", "args": ["Hello from MCP!"]} - ) - print(f"\nTool result: {result}") - - except Exception as e: - print(f"Error: {e}") - - -async def demo_list_resources(): - """Demo: List resources from MCP server.""" - print("\n=== Demo 4: List Resources ===") - - try: - async with streamablehttp_client("http://localhost:8000/mcp") as streams: - async with ClientSession(streams[0], streams[1]) as session: - await session.initialize() - - # List resources - resources = await session.list_resources() - print(f"Available resources ({len(resources.resources)}):") - for resource in resources.resources[:5]: - print(f" - {resource.uri}: {resource.name}") - - except Exception as e: - print(f"Error: {e}") - - -async def demo_list_prompts(): - """Demo: List prompts from MCP server.""" - print("\n=== Demo 5: List Prompts ===") - - try: - async with streamablehttp_client("http://localhost:8000/mcp") as streams: - async with ClientSession(streams[0], streams[1]) as session: - await session.initialize() - - # List prompts - prompts = await session.list_prompts() - print(f"Available prompts ({len(prompts.prompts)}):") - for prompt in prompts.prompts[:5]: - print(f" - {prompt.name}: {prompt.description}") - - except Exception as e: - print(f"Error: {e}") - - -async def main(): - # Demo 2: SSE (requires MCP server on localhost:8000) - await demo_sse(host="http://localhost:9100/sse") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/backend/tests/services/__init__.py b/backend/tests/services/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/backend/tests/services/test_graph_service_runtime_prompt_cache.py b/backend/tests/services/test_graph_service_runtime_prompt_cache.py deleted file mode 100644 index 83ec60b13..000000000 --- a/backend/tests/services/test_graph_service_runtime_prompt_cache.py +++ /dev/null @@ -1,130 +0,0 @@ -import importlib -import sys -import types -import uuid -from datetime import datetime, timezone - -from app.models.graph import AgentGraph - - -def _import_with_optional_dependency_stubs(module_name: str): - module_names = ("langchain_google_genai", "pydantic_ai_backends") - previous_modules = {name: sys.modules.get(name) for name in module_names} - modules_before_import = set(sys.modules) - - for name in module_names: - sys.modules.pop(name, None) - - try: - genai_stub = types.ModuleType("langchain_google_genai") - - class _ChatGoogleGenerativeAI: - def __init__(self, *args, **kwargs): - self.args = args - self.kwargs = kwargs - - genai_stub.ChatGoogleGenerativeAI = _ChatGoogleGenerativeAI - sys.modules["langchain_google_genai"] = genai_stub - - backends_stub = types.ModuleType("pydantic_ai_backends") - - class _DockerSandbox: - def __init__(self, *args, **kwargs): - self.args = args - self.kwargs = kwargs - - class _RuntimeConfig: - def __init__(self, *args, **kwargs): - self.args = args - self.kwargs = kwargs - - backends_stub.DockerSandbox = _DockerSandbox - backends_stub.RuntimeConfig = _RuntimeConfig - sys.modules["pydantic_ai_backends"] = backends_stub - - imported_module = importlib.import_module(module_name) - return imported_module - finally: - new_modules = set(sys.modules) - modules_before_import - for name in new_modules: - if name.startswith("app."): - sys.modules.pop(name, None) - for name, previous in previous_modules.items(): - if previous is None: - sys.modules.pop(name, None) - else: - sys.modules[name] = previous - - -_graph_service = _import_with_optional_dependency_stubs("app.services.graph_service") -_build_runtime_aware_compile_cache_key = _graph_service._build_runtime_aware_compile_cache_key - - -def _make_graph(*, graph_id: uuid.UUID, context: dict) -> AgentGraph: - graph = AgentGraph( - id=graph_id, - name="Runtime Cache Graph", - user_id="owner-user", - variables={"context": context}, - ) - graph.updated_at = datetime(2026, 1, 1, tzinfo=timezone.utc) - return graph - - -def test_cache_key_changes_when_thread_id_changes() -> None: - graph = _make_graph(graph_id=uuid.uuid4(), context={}) - - key1 = _build_runtime_aware_compile_cache_key(graph, user_id="user-1", thread_id="thread-1") - key2 = _build_runtime_aware_compile_cache_key(graph, user_id="user-1", thread_id="thread-2") - - assert key1 != key2 - - -def test_cache_key_changes_when_graph_context_override_changes() -> None: - graph_id = uuid.uuid4() - graph1 = _make_graph(graph_id=graph_id, context={"thread_id": "override-1"}) - graph2 = _make_graph(graph_id=graph_id, context={"thread_id": "override-2"}) - - key1 = _build_runtime_aware_compile_cache_key(graph1, user_id="user-1", thread_id="thread-raw") - key2 = _build_runtime_aware_compile_cache_key(graph2, user_id="user-1", thread_id="thread-raw") - - assert key1 != key2 - - -def test_cache_key_stable_for_equivalent_context_ordering() -> None: - graph_id = uuid.uuid4() - graph1 = _make_graph( - graph_id=graph_id, - context={ - "project": "x", - "meta": {"alpha": 1, "beta": 2}, - }, - ) - graph2 = _make_graph( - graph_id=graph_id, - context={ - "meta": {"beta": 2, "alpha": 1}, - "project": "x", - }, - ) - - key1 = _build_runtime_aware_compile_cache_key(graph1, user_id="user-1", thread_id="thread-raw") - key2 = _build_runtime_aware_compile_cache_key(graph2, user_id="user-1", thread_id="thread-raw") - - assert key1 == key2 - - -def test_cache_key_repeated_execution_safe_for_runtime_context_changes() -> None: - graph = _make_graph(graph_id=uuid.uuid4(), context={}) - - first_key = _build_runtime_aware_compile_cache_key(graph, user_id="user-1", thread_id="thread-raw") - repeated_same_key = _build_runtime_aware_compile_cache_key(graph, user_id="user-1", thread_id="thread-raw") - assert repeated_same_key == first_key - - graph.variables = {"context": {"project": "alpha"}} - changed_context_key = _build_runtime_aware_compile_cache_key(graph, user_id="user-1", thread_id="thread-raw") - assert changed_context_key != first_key - - graph.variables = {"context": {"project": "beta"}} - changed_context_key_again = _build_runtime_aware_compile_cache_key(graph, user_id="user-1", thread_id="thread-raw") - assert changed_context_key_again != changed_context_key diff --git a/backend/tests/services/test_sandbox_manager.py b/backend/tests/services/test_sandbox_manager.py deleted file mode 100644 index 756509b6b..000000000 --- a/backend/tests/services/test_sandbox_manager.py +++ /dev/null @@ -1,146 +0,0 @@ -""" -Tests for SandboxManagerService -""" - -import uuid -from unittest.mock import AsyncMock, MagicMock, patch - -import pytest -from sqlalchemy.orm import configure_mappers - -# Import models to ensure mappers are registered -from app.services.sandbox_manager import SandboxManagerService - -# Ensure mappers are configured -configure_mappers() - - -@pytest.mark.asyncio -async def test_create_sandbox_record(): - # Setup mock db - mock_db_session = AsyncMock() - mock_db_session.add = MagicMock() - mock_db_session.commit = AsyncMock() - mock_db_session.refresh = AsyncMock() - - # Mock execute result for get_user_sandbox_record (returning None) - mock_result = MagicMock() - mock_result.scalar_one_or_none.return_value = None - mock_db_session.execute.return_value = mock_result - - service = SandboxManagerService(mock_db_session) - user_id = "test_user_1" - - # Run - # We mock UserSandbox constructor to avoid mapper issues if they persist - # But usually importing AuthUser should fix it. - - sandbox = await service.create_sandbox_record(user_id) - - # Verify - assert sandbox.user_id == user_id - assert sandbox.status == "pending" - mock_db_session.add.assert_called_once() - mock_db_session.commit.assert_awaited_once() - - -@pytest.mark.asyncio -async def test_ensure_sandbox_running_new(): - # Setup mocks - mock_db_session = AsyncMock() - mock_result = MagicMock() - # Return a mock record - mock_record = MagicMock() - mock_record.id = "sandbox-123" - mock_record.image = "python:3.12-slim" - mock_record.idle_timeout = 3600 - mock_record.status = "pending" - mock_result.scalar_one_or_none.return_value = mock_record - mock_db_session.execute.return_value = mock_result - - with ( - patch("app.services.sandbox_manager._sandbox_pool") as mock_pool, - patch("app.services.sandbox_manager.PydanticSandboxAdapter") as mock_adapter_cls, - patch("os.makedirs") as mock_makedirs, - ): - # Configure mocks - mock_pool.get = AsyncMock(return_value=None) - mock_pool.put = AsyncMock() - - mock_adapter_instance = MagicMock() - mock_adapter_instance.is_started.return_value = True - mock_adapter_cls.return_value = mock_adapter_instance - - service = SandboxManagerService(mock_db_session) - user_id = "user-123" - - # Run - adapter = await service.ensure_sandbox_running(user_id) - - # Validations - assert adapter is not None - mock_adapter_cls.assert_called_once() - mock_pool.put.assert_called_once() - - # Verify volume creation - expected_dir = f"/tmp/sandboxes/{user_id}" - mock_makedirs.assert_called_once_with(expected_dir, exist_ok=True) - - # Verify adapter called with volumes and auto_remove=False for user sandbox - call_args = mock_adapter_cls.call_args - assert call_args.kwargs["volumes"] == {expected_dir: "/workspace"} - assert call_args.kwargs.get("auto_remove") is False - - # DB updates should happen - assert mock_db_session.execute.call_count >= 1 - - -@pytest.mark.asyncio -async def test_stop_sandbox(): - # Setup mocks - mock_db_session = AsyncMock() - mock_result = MagicMock() - # Configure rowcount for the update result - mock_result.rowcount = 1 - mock_db_session.execute.return_value = mock_result - mock_db_session.commit = AsyncMock() - - with patch("app.services.sandbox_manager._sandbox_pool") as mock_pool: - mock_pool.stop = AsyncMock() - - service = SandboxManagerService(mock_db_session) - sandbox_id = str(uuid.uuid4()) - - # Run - success = await service.stop_sandbox(sandbox_id) - - # Validations: stop_sandbox only stops container, does not remove from pool - assert success is True - mock_pool.stop.assert_called_once_with(sandbox_id) - mock_db_session.execute.assert_called() - mock_db_session.commit.assert_awaited() - - -@pytest.mark.asyncio -async def test_cleanup_idle_sandboxes(): - # Setup mocks - mock_db_session = AsyncMock() - mock_db_session.execute = AsyncMock() - mock_db_session.commit = AsyncMock() - - with patch("app.services.sandbox_manager._sandbox_pool") as mock_pool: - # Mock cleanup returning list of IDs - evicted_ids = ["sandbox-1", "sandbox-2"] - mock_pool.cleanup_idle = AsyncMock(return_value=evicted_ids) - - service = SandboxManagerService(mock_db_session) - - # Run - count = await service.cleanup_idle_sandboxes() - - # Verify - assert count == 2 - mock_pool.cleanup_idle.assert_called_once() - # Verify DB update called - mock_db_session.execute.assert_called_once() - mock_db_session.commit.assert_awaited_once() diff --git a/backend/tests/test_api/__init__.py b/backend/tests/test_api/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/backend/tests/test_api/test_chat_commands_chat_run.py b/backend/tests/test_api/test_chat_commands_chat_run.py deleted file mode 100644 index 52423c3e7..000000000 --- a/backend/tests/test_api/test_chat_commands_chat_run.py +++ /dev/null @@ -1,55 +0,0 @@ -from app.websocket.chat_commands import ChatRunTurnCommand, build_command_from_parsed_frame -from app.websocket.chat_protocol import parse_client_frame - - -def test_chat_extension_produces_chat_run_turn_command(): - parsed = parse_client_frame( - { - "type": "chat.start", - "request_id": "req-1", - "thread_id": "t-1", - "input": {"message": "hello"}, - "extension": {"kind": "chat", "run_id": "run-xyz"}, - "metadata": {}, - } - ) - command = build_command_from_parsed_frame(parsed) - assert isinstance(command, ChatRunTurnCommand) - assert command.run_id == "run-xyz" - assert command.message == "hello" - - -def test_no_extension_still_produces_standard_command(): - from app.websocket.chat_commands import StandardChatTurnCommand - - parsed = parse_client_frame( - { - "type": "chat.start", - "request_id": "req-2", - "input": {"message": "hi"}, - "extension": None, - "metadata": {}, - } - ) - command = build_command_from_parsed_frame(parsed) - assert isinstance(command, StandardChatTurnCommand) - assert not isinstance(command, ChatRunTurnCommand) - - -def test_skill_creator_extension_still_produces_skill_creator_command(): - from app.websocket.chat_commands import SkillCreatorTurnCommand - - parsed = parse_client_frame( - { - "type": "chat.start", - "request_id": "req-3", - "input": {"message": "create skill"}, - "extension": {"kind": "skill_creator", "run_id": "run-sc", "edit_skill_id": "sk-1"}, - "metadata": {}, - } - ) - command = build_command_from_parsed_frame(parsed) - assert isinstance(command, SkillCreatorTurnCommand) - assert not isinstance(command, ChatRunTurnCommand) - assert command.run_id == "run-sc" - assert command.edit_skill_id == "sk-1" diff --git a/backend/tests/test_api/test_chat_commands_copilot.py b/backend/tests/test_api/test_chat_commands_copilot.py deleted file mode 100644 index b78da28c9..000000000 --- a/backend/tests/test_api/test_chat_commands_copilot.py +++ /dev/null @@ -1,62 +0,0 @@ -"""Tests for copilot command dispatch.""" - -from app.websocket.chat_commands import ( - ChatRunTurnCommand, - CopilotTurnCommand, - SkillCreatorTurnCommand, - build_command_from_parsed_frame, -) -from app.websocket.chat_protocol import ParsedChatInput, ParsedChatStartFrame, ParsedCopilotExtension - - -def test_copilot_extension_produces_copilot_turn_command(): - frame = ParsedChatStartFrame( - request_id="req-1", - thread_id=None, - graph_id=None, - input=ParsedChatInput(message="Build RAG", files=[], provider_name=None, model_name=None), - extension=ParsedCopilotExtension( - kind="copilot", - run_id="run-123", - graph_context={"nodes": [], "edges": []}, - conversation_history=[{"role": "user", "content": "hi"}], - mode="deepagents", - ), - metadata={}, - ) - cmd = build_command_from_parsed_frame(frame) - assert isinstance(cmd, CopilotTurnCommand) - assert cmd.run_id == "run-123" - assert cmd.graph_context == {"nodes": [], "edges": []} - assert cmd.conversation_history == [{"role": "user", "content": "hi"}] - assert cmd.mode == "deepagents" - assert cmd.message == "Build RAG" - - -def test_no_extension_still_standard(): - frame = ParsedChatStartFrame( - request_id="req-2", - thread_id=None, - graph_id=None, - input=ParsedChatInput(message="hello", files=[], provider_name=None, model_name=None), - extension=None, - metadata={}, - ) - cmd = build_command_from_parsed_frame(frame) - assert not isinstance(cmd, CopilotTurnCommand) - assert not isinstance(cmd, SkillCreatorTurnCommand) - - -def test_chat_extension_still_chat_run(): - from app.websocket.chat_protocol import ParsedChatExtension - - frame = ParsedChatStartFrame( - request_id="req-3", - thread_id=None, - graph_id=None, - input=ParsedChatInput(message="hello", files=[], provider_name=None, model_name=None), - extension=ParsedChatExtension(kind="chat", run_id="r1"), - metadata={}, - ) - cmd = build_command_from_parsed_frame(frame) - assert isinstance(cmd, ChatRunTurnCommand) diff --git a/backend/tests/test_api/test_chat_protocol.py b/backend/tests/test_api/test_chat_protocol.py deleted file mode 100644 index 396af48f1..000000000 --- a/backend/tests/test_api/test_chat_protocol.py +++ /dev/null @@ -1,153 +0,0 @@ -import uuid - -import pytest - -from app.websocket.chat_protocol import ( - ChatProtocolError, - ParsedChatStartFrame, - parse_client_frame, -) - - -def test_parse_standard_chat_start_frame(): - parsed = parse_client_frame( - { - "type": "chat.start", - "request_id": "req-1", - "thread_id": None, - "graph_id": None, - "input": {"message": "hello", "files": []}, - "extension": None, - "metadata": {}, - } - ) - - assert isinstance(parsed, ParsedChatStartFrame) - assert parsed.request_id == "req-1" - assert parsed.input.message == "hello" - assert parsed.extension is None - - -def test_parse_skill_creator_extension_frame(): - parsed = parse_client_frame( - { - "type": "chat.start", - "request_id": "req-2", - "input": {"message": "build a skill", "files": []}, - "extension": { - "kind": "skill_creator", - "run_id": "123e4567-e89b-12d3-a456-426614174000", - "edit_skill_id": "skill-42", - }, - "metadata": {}, - } - ) - - assert parsed.extension is not None - assert parsed.extension.kind == "skill_creator" - assert parsed.extension.edit_skill_id == "skill-42" - - -def test_parse_chat_start_frame_coerces_graph_id_to_uuid(): - graph_id = uuid.uuid4() - - parsed = parse_client_frame( - { - "type": "chat.start", - "request_id": "req-graph", - "thread_id": None, - "graph_id": str(graph_id), - "input": {"message": "hello", "files": []}, - "extension": None, - "metadata": {}, - } - ) - - assert isinstance(parsed, ParsedChatStartFrame) - assert parsed.graph_id == graph_id - - -def test_reserved_metadata_control_keys_are_rejected(): - try: - parse_client_frame( - { - "type": "chat.start", - "request_id": "req-3", - "input": {"message": "hello"}, - "extension": None, - "metadata": {"mode": "apk-vulnerability"}, - } - ) - except ChatProtocolError as exc: - assert exc.message == "reserved metadata keys are not allowed" - assert exc.request_id == "req-3" - else: - raise AssertionError("expected ChatProtocolError") - - -def test_metadata_files_key_is_rejected_for_typed_chat_start(): - with pytest.raises(ChatProtocolError) as exc: - parse_client_frame( - { - "type": "chat.start", - "request_id": "req-files", - "input": {"message": "hello", "files": []}, - "extension": None, - "metadata": { - "files": [{"filename": "notes.md", "path": "/tmp/notes.md"}], - }, - } - ) - - assert exc.value.message == "reserved metadata keys are not allowed" - assert exc.value.request_id == "req-files" - - -def test_parse_ping_frame_passes_through(): - parsed = parse_client_frame({"type": "ping"}) - assert isinstance(parsed, dict) - assert parsed.get("type") == "ping" - - -def test_parse_chat_resume_and_stop_return_dicts(): - assert parse_client_frame({"type": "chat.resume", "request_id": "req-r"}).get("type") == "chat.resume" - assert parse_client_frame({"type": "chat.stop", "request_id": "req-s"}).get("type") == "chat.stop" - - -def test_legacy_chat_frame_is_rejected_as_unknown(): - with pytest.raises(ChatProtocolError) as exc: - parse_client_frame( - { - "type": "chat", - "request_id": "req-old", - "message": "hello", - "metadata": {"mode": "skill_creator"}, - } - ) - - assert "unknown frame type" in exc.value.message - - -def test_malformed_chat_start_missing_input_raises(): - with pytest.raises(ChatProtocolError) as exc: - parse_client_frame({"type": "chat.start", "request_id": "req-bad"}) - - assert "input" in exc.value.message.lower() - assert exc.value.request_id == "req-bad" - - -def test_invalid_graph_id_raises_protocol_error(): - with pytest.raises(ChatProtocolError) as exc: - parse_client_frame( - { - "type": "chat.start", - "request_id": "req-graph-bad", - "graph_id": "not-a-uuid", - "input": {"message": "hello", "files": []}, - "extension": None, - "metadata": {}, - } - ) - - assert exc.value.message == "chat.start frame graph_id must be a valid UUID" - assert exc.value.request_id == "req-graph-bad" diff --git a/backend/tests/test_api/test_chat_protocol_chat_extension.py b/backend/tests/test_api/test_chat_protocol_chat_extension.py deleted file mode 100644 index c04d97963..000000000 --- a/backend/tests/test_api/test_chat_protocol_chat_extension.py +++ /dev/null @@ -1,54 +0,0 @@ -from app.websocket.chat_protocol import ( - ParsedChatStartFrame, - parse_client_frame, -) - - -def test_parse_chat_extension_frame(): - parsed = parse_client_frame( - { - "type": "chat.start", - "request_id": "req-chat-1", - "thread_id": "t-1", - "graph_id": None, - "input": {"message": "hello"}, - "extension": {"kind": "chat", "run_id": "run-abc"}, - "metadata": {}, - } - ) - assert isinstance(parsed, ParsedChatStartFrame) - assert parsed.extension is not None - assert parsed.extension.kind == "chat" - assert parsed.extension.run_id == "run-abc" - - -def test_parse_chat_extension_with_no_run_id(): - parsed = parse_client_frame( - { - "type": "chat.start", - "request_id": "req-chat-2", - "input": {"message": "hi"}, - "extension": {"kind": "chat"}, - "metadata": {}, - } - ) - assert parsed.extension is not None - assert parsed.extension.kind == "chat" - assert parsed.extension.run_id is None - - -def test_unsupported_extension_kind_still_rejected(): - import pytest - - from app.websocket.chat_protocol import ChatProtocolError - - with pytest.raises(ChatProtocolError, match="unsupported extension kind"): - parse_client_frame( - { - "type": "chat.start", - "request_id": "req-bad", - "input": {"message": "hi"}, - "extension": {"kind": "unknown_future_kind"}, - "metadata": {}, - } - ) diff --git a/backend/tests/test_api/test_chat_protocol_copilot_extension.py b/backend/tests/test_api/test_chat_protocol_copilot_extension.py deleted file mode 100644 index df71a1800..000000000 --- a/backend/tests/test_api/test_chat_protocol_copilot_extension.py +++ /dev/null @@ -1,78 +0,0 @@ -"""Tests for copilot extension parsing in chat protocol.""" - -import pytest - -from app.websocket.chat_protocol import ChatProtocolError, parse_client_frame - - -def _make_copilot_frame(*, graph_context=None, mode=None, run_id=None, conversation_history=None): - return { - "type": "chat.start", - "request_id": "req-1", - "graph_id": "00000000-0000-0000-0000-000000000001", - "input": {"message": "Build a RAG pipeline"}, - "extension": { - "kind": "copilot", - "run_id": run_id, - "graph_context": graph_context if graph_context is not None else {"nodes": [], "edges": []}, - "conversation_history": conversation_history or [], - "mode": mode or "deepagents", - }, - } - - -def test_parse_copilot_extension_frame(): - result = parse_client_frame(_make_copilot_frame(run_id="run-abc")) - assert result.extension.kind == "copilot" - assert result.extension.run_id == "run-abc" - assert result.extension.graph_context == {"nodes": [], "edges": []} - assert result.extension.mode == "deepagents" - assert result.extension.conversation_history == [] - - -def test_parse_copilot_extension_defaults(): - """mode defaults to deepagents, conversation_history defaults to [].""" - frame = { - "type": "chat.start", - "request_id": "req-2", - "input": {"message": "test"}, - "extension": {"kind": "copilot", "graph_context": {"nodes": []}}, - } - result = parse_client_frame(frame) - assert result.extension.kind == "copilot" - assert result.extension.mode == "deepagents" - assert result.extension.conversation_history == [] - assert result.extension.run_id is None - - -def test_parse_copilot_extension_missing_graph_context(): - """graph_context is required — missing it should raise.""" - frame = { - "type": "chat.start", - "request_id": "req-3", - "input": {"message": "test"}, - "extension": {"kind": "copilot"}, - } - with pytest.raises(ChatProtocolError, match="graph_context"): - parse_client_frame(frame) - - -def test_existing_extensions_still_work(): - """Regression: skill_creator and chat extensions unchanged.""" - sc_frame = { - "type": "chat.start", - "request_id": "req-4", - "input": {"message": "test"}, - "extension": {"kind": "skill_creator", "run_id": "r1", "edit_skill_id": "s1"}, - } - result = parse_client_frame(sc_frame) - assert result.extension.kind == "skill_creator" - - chat_frame = { - "type": "chat.start", - "request_id": "req-5", - "input": {"message": "test"}, - "extension": {"kind": "chat", "run_id": "r2"}, - } - result = parse_client_frame(chat_frame) - assert result.extension.kind == "chat" diff --git a/backend/tests/test_api/test_chat_ws_handler.py b/backend/tests/test_api/test_chat_ws_handler.py deleted file mode 100644 index 149b7bdcc..000000000 --- a/backend/tests/test_api/test_chat_ws_handler.py +++ /dev/null @@ -1,561 +0,0 @@ -"""Tests for ChatWsHandler — focuses on critical frame-output paths.""" - -import asyncio -import json -import uuid -from unittest.mock import AsyncMock, MagicMock, patch - -import pytest - -from app.websocket.chat_ws_handler import ChatTaskEntry, ChatWsHandler - -# --------------------------------------------------------------------------- -# Helpers -# --------------------------------------------------------------------------- - - -class MockWebSocket: - """Minimal WebSocket stub that records sent frames.""" - - def __init__(self): - self.sent: list[dict] = [] - self.closed = False - - async def send_text(self, data: str) -> None: - self.sent.append(json.loads(data)) - - async def close(self, code: int = 1000) -> None: - self.closed = True - - def frames_of_type(self, type_: str) -> list[dict]: - return [f for f in self.sent if f.get("type") == type_] - - -def make_handler(ws: MockWebSocket | None = None) -> tuple[ChatWsHandler, MockWebSocket]: - if ws is None: - ws = MockWebSocket() - handler = ChatWsHandler(user_id="user-123", websocket=ws) - return handler, ws - - -# --------------------------------------------------------------------------- -# Ping/pong -# --------------------------------------------------------------------------- - - -@pytest.mark.asyncio -async def test_ping_returns_pong() -> None: - handler, ws = make_handler() - await handler._handle_frame(json.dumps({"type": "ping"})) - assert ws.frames_of_type("pong"), "expected a pong frame" - - -@pytest.mark.asyncio -async def test_typed_resume_routes_to_resume_handler() -> None: - handler, _ = make_handler() - with patch.object(handler, "_handle_resume", new_callable=AsyncMock) as mock_resume: - await handler._handle_frame( - json.dumps( - { - "type": "chat.resume", - "request_id": "req-resume", - "thread_id": "thread-typed", - "command": {}, - } - ) - ) - mock_resume.assert_awaited_once() - - -@pytest.mark.asyncio -async def test_typed_stop_routes_to_stop_handler() -> None: - handler, _ = make_handler() - with patch.object(handler, "_handle_stop", new_callable=AsyncMock) as mock_stop: - await handler._handle_frame(json.dumps({"type": "chat.stop", "request_id": "req-stop"})) - mock_stop.assert_awaited_once() - - -@pytest.mark.asyncio -async def test_malformed_chat_start_returns_protocol_error() -> None: - handler, ws = make_handler() - await handler._handle_frame(json.dumps({"type": "chat.start", "request_id": "req-bad"})) - - errors = ws.frames_of_type("ws_error") - assert errors, "malformed chat.start should send ws_error" - error = errors[0] - assert "input" in error.get("message", "").lower() - assert error.get("request_id") == "req-bad" - - -@pytest.mark.asyncio -async def test_protocol_error_does_not_register_task() -> None: - handler, ws = make_handler() - - with patch.object(handler._task_supervisor, "register", wraps=handler._task_supervisor.register) as mock_register: - await handler._handle_frame(json.dumps({"type": "chat.start", "request_id": "req-bad"})) - - errors = ws.frames_of_type("ws_error") - assert errors, "malformed frame must emit ws_error" - mock_register.assert_not_called() - - -# --------------------------------------------------------------------------- -# Duplicate request guard -# --------------------------------------------------------------------------- - - -@pytest.mark.asyncio -async def test_duplicate_request_id_sends_ws_error() -> None: - handler, ws = make_handler() - # Pre-populate _tasks with a fake entry for the same request_id - fake_task = MagicMock(spec=asyncio.Task) - handler._tasks["req-1"] = ChatTaskEntry(thread_id=None, task=fake_task) - - frame = json.dumps( - { - "type": "chat.start", - "request_id": "req-1", - "input": {"message": "hello"}, - "metadata": {}, - } - ) - await handler._handle_frame(frame) - - errors = ws.frames_of_type("ws_error") - assert errors, "expected ws_error for duplicate request_id" - assert "duplicate" in errors[0].get("message", "") - - -@pytest.mark.asyncio -async def test_direct_task_insertion_keeps_thread_active_guard() -> None: - handler, ws = make_handler() - fake_task = MagicMock(spec=asyncio.Task) - handler._tasks["req-existing"] = ChatTaskEntry(thread_id="thread-1", task=fake_task) - - await handler._handle_frame( - json.dumps( - { - "type": "chat.start", - "request_id": "req-next", - "thread_id": "thread-1", - "input": {"message": "hello"}, - "metadata": {}, - } - ) - ) - - assert ws.frames_of_type("ws_error") == [ - { - "type": "ws_error", - "request_id": "req-next", - "message": "turn already in progress for thread_id", - } - ] - - -@pytest.mark.asyncio -async def test_legacy_chat_frame_is_rejected_before_task_creation() -> None: - handler, ws = make_handler() - - legacy_frame = json.dumps( - { - "type": "chat", - "request_id": "req-legacy", - "message": "build me a skill", - "metadata": { - "mode": "skill_creator", - "run_id": str(uuid.uuid4()), - "edit_skill_id": "legacy-skill", - }, - } - ) - - await handler._handle_frame(legacy_frame) - - assert ws.frames_of_type("ws_error") == [ - { - "type": "ws_error", - "request_id": "req-legacy", - "message": "legacy metadata control fields are no longer supported", - } - ] - assert "req-legacy" not in handler._tasks - - -# --------------------------------------------------------------------------- -# Command parsing edge cases -# --------------------------------------------------------------------------- - - -@pytest.mark.asyncio -async def test_non_object_json_frame_returns_ws_error() -> None: - handler, ws = make_handler() - - await handler._handle_frame(json.dumps(["unexpected", "array"])) - - errors = ws.frames_of_type("ws_error") - assert errors, "non-object JSON should emit ws_error instead of crashing" - assert "object" in errors[0].get("message", "").lower() - - -@pytest.mark.asyncio -async def test_chat_start_with_invalid_extension_data_is_rejected() -> None: - handler, ws = make_handler() - - await handler._handle_frame( - json.dumps( - { - "type": "chat.start", - "request_id": "req-invalid", - "input": {"message": "hello"}, - "extension": {"kind": "unknown"}, - "metadata": {}, - } - ) - ) - - assert ws.frames_of_type("ws_error") == [ - { - "type": "ws_error", - "request_id": "req-invalid", - "message": "unsupported extension kind: unknown", - } - ] - assert "req-invalid" not in handler._tasks - - -@pytest.mark.asyncio -async def test_typed_skill_creator_extension_propagates_run_metadata() -> None: - handler, ws = make_handler() - captured_payload = None - run_id = uuid.uuid4() - - async def fake_run_chat_turn(*, request_id: str, payload) -> None: - nonlocal captured_payload - assert request_id == "req-skill" - captured_payload = payload - - frame = json.dumps( - { - "type": "chat.start", - "request_id": "req-skill", - "thread_id": None, - "input": {"message": "build a skill", "files": []}, - "extension": { - "kind": "skill_creator", - "run_id": str(run_id), - "edit_skill_id": "skill-42", - }, - "metadata": {}, - } - ) - - with patch.object(handler, "_run_chat_turn", side_effect=fake_run_chat_turn) as mock_run_chat_turn: - await handler._handle_frame(frame) - entry = handler._tasks["req-skill"] - await entry.task - - mock_run_chat_turn.assert_awaited_once() - assert captured_payload is not None - assert captured_payload.metadata["edit_skill_id"] == "skill-42" - assert handler._tasks["req-skill"].run_id == run_id - assert ws.sent == [] - - -@pytest.mark.asyncio -async def test_input_files_are_forwarded_into_metadata() -> None: - handler, ws = make_handler() - captured_payload = None - - async def fake_run_chat_turn(*, request_id: str, payload) -> None: - nonlocal captured_payload - assert request_id == "req-files" - captured_payload = payload - - files = [ - {"filename": "notes.md", "path": "/tmp/notes.md", "size": 10}, - {"filename": "plan.txt", "path": "/data/plan.txt", "size": 42}, - ] - frame = json.dumps( - { - "type": "chat.start", - "request_id": "req-files", - "thread_id": None, - "input": {"message": "see attached", "files": files}, - "extension": None, - "metadata": {"foo": "bar"}, - } - ) - - with patch.object(handler, "_run_chat_turn", side_effect=fake_run_chat_turn) as mock_run_chat_turn: - await handler._handle_frame(frame) - entry = handler._tasks["req-files"] - await entry.task - - mock_run_chat_turn.assert_awaited_once() - assert captured_payload is not None - assert captured_payload.metadata["files"] == files - assert captured_payload.metadata["foo"] == "bar" - assert ws.sent == [] - - -# --------------------------------------------------------------------------- -# _handle_stop: no-op for unknown request, cancels for known request -# --------------------------------------------------------------------------- - - -@pytest.mark.asyncio -async def test_handle_stop_noop_for_unknown_request() -> None: - handler, ws = make_handler() - # Should not raise - await handler._handle_frame(json.dumps({"type": "chat.stop", "request_id": "unknown-req"})) - assert ws.sent == [], "stop for unknown request should send nothing" - - -@pytest.mark.asyncio -async def test_stop_by_request_id_cancels_turn_before_thread_assignment() -> None: - handler, _ = make_handler() - entered = asyncio.Event() - cancelled = asyncio.Event() - - async def fake_run_standard_turn(_command) -> None: - entered.set() - try: - await asyncio.sleep(60) - except asyncio.CancelledError: - cancelled.set() - raise - - with patch.object(handler._turn_executor, "run_standard_turn", side_effect=fake_run_standard_turn): - await handler._handle_frame( - json.dumps( - { - "type": "chat.start", - "request_id": "req-stop-early", - "input": {"message": "hello"}, - "extension": None, - "metadata": {}, - } - ) - ) - await entered.wait() - await handler._handle_frame(json.dumps({"type": "chat.stop", "request_id": "req-stop-early"})) - await asyncio.sleep(0) - - assert cancelled.is_set() - - -@pytest.mark.asyncio -async def test_handle_stop_cancels_known_task() -> None: - handler, ws = make_handler() - cancelled = False - - async def slow(): - nonlocal cancelled - try: - await asyncio.sleep(10) - except asyncio.CancelledError: - cancelled = True - raise - - task = asyncio.create_task(slow()) - await asyncio.sleep(0) - handler._tasks["req-stop"] = ChatTaskEntry(thread_id="thread-1", task=task) - - with patch("app.websocket.chat_ws_handler.task_manager") as mock_tm: - mock_tm.stop_task = AsyncMock() - await handler._handle_frame(json.dumps({"type": "chat.stop", "request_id": "req-stop"})) - mock_tm.stop_task.assert_awaited_once_with("thread-1") - - # Give event loop a tick to propagate cancellation - with pytest.raises(asyncio.CancelledError): - await task - assert cancelled - - -# --------------------------------------------------------------------------- -# _cancel_all_tasks: called on disconnect, cancels everything -# --------------------------------------------------------------------------- - - -@pytest.mark.asyncio -async def test_cancel_all_tasks_on_disconnect() -> None: - """run() must call _cancel_all_tasks when WebSocketDisconnect is raised.""" - from fastapi import WebSocketDisconnect - - handler, ws = make_handler() - - done_flag = asyncio.Event() - - async def long_running(): - try: - await asyncio.sleep(60) - except asyncio.CancelledError: - done_flag.set() - raise - - task = asyncio.create_task(long_running()) - await asyncio.sleep(0) - handler._tasks["req-dc"] = ChatTaskEntry(thread_id=None, task=task) - - # Simulate disconnect: receive_text raises WebSocketDisconnect - ws_mock = MagicMock() - ws_mock.receive_text = AsyncMock(side_effect=WebSocketDisconnect(code=1001)) - handler.websocket = ws_mock - - with patch("app.websocket.chat_ws_handler.task_manager") as mock_tm: - mock_tm.stop_task = AsyncMock() - await handler.run() - - # Wait briefly for the task to be cancelled - await asyncio.sleep(0.05) - assert done_flag.is_set(), "long-running task should have been cancelled on disconnect" - - -# --------------------------------------------------------------------------- -# _run_chat_turn: done frame sent even when thread_id is None (early failure) -# --------------------------------------------------------------------------- - - -@pytest.mark.asyncio -async def test_done_frame_sent_on_early_error_no_thread_id() -> None: - """ - If get_or_create_conversation raises before thread_id is assigned, - the handler must still send a done frame (not just an error frame). - """ - handler, ws = make_handler() - - from app.schemas.chat import ChatRequest - - payload = ChatRequest(message="hello", thread_id=None, graph_id=None, metadata={}) - - with ( - patch("app.websocket.chat_ws_handler.AsyncSessionLocal") as mock_session_cls, - patch("app.websocket.chat_ws_handler.get_or_create_conversation", side_effect=RuntimeError("db down")), - patch("app.websocket.chat_ws_handler._finalize_task_noop", create=True), - patch("app.websocket.chat_ws_handler.task_manager"), - ): - # Make AsyncSessionLocal return an async context manager - mock_cm = AsyncMock() - mock_cm.__aenter__ = AsyncMock(return_value=MagicMock()) - mock_cm.__aexit__ = AsyncMock(return_value=False) - mock_session_cls.return_value = mock_cm - - await handler._run_chat_turn(request_id="req-early", payload=payload) - - sent_types = [f["type"] for f in ws.sent] - assert "error" in sent_types, "error frame must be sent" - assert "done" in sent_types, "done frame must be sent even when thread_id is None" - - # done must come after error - assert sent_types.index("done") > sent_types.index("error") - - -# --------------------------------------------------------------------------- -# CancelledError: done frame sent before re-raising -# --------------------------------------------------------------------------- - - -@pytest.mark.asyncio -async def test_cancelled_error_sends_done_frame() -> None: - """ - When the turn task is cancelled mid-stream, a done frame must be sent - before asyncio.CancelledError propagates. - """ - handler, ws = make_handler() - - from app.schemas.chat import ChatRequest - - payload = ChatRequest(message="hello", thread_id=None, graph_id=None, metadata={}) - - async def raise_cancelled(*args, **kwargs): - raise asyncio.CancelledError() - - with ( - patch("app.websocket.chat_ws_handler.AsyncSessionLocal") as mock_session_cls, - patch("app.websocket.chat_ws_handler.get_or_create_conversation", side_effect=raise_cancelled), - patch("app.websocket.chat_ws_handler.task_manager"), - ): - mock_cm = AsyncMock() - mock_cm.__aenter__ = AsyncMock(return_value=MagicMock()) - mock_cm.__aexit__ = AsyncMock(return_value=False) - mock_session_cls.return_value = mock_cm - - with pytest.raises(asyncio.CancelledError): - await handler._run_chat_turn(request_id="req-cancel", payload=payload) - - sent_types = [f["type"] for f in ws.sent] - assert "done" in sent_types, "done frame must be sent on CancelledError" - - -# --------------------------------------------------------------------------- -# Accepted ack: emitted after task registration and before first status frame -# --------------------------------------------------------------------------- - - -@pytest.mark.asyncio -async def test_run_chat_turn_emits_accepted_before_status() -> None: - handler, ws = make_handler() - - from app.schemas.chat import ChatRequest - - payload = ChatRequest(message="hello", thread_id=None, graph_id=None, metadata={}) - - class FakeGraph: - async def astream_events(self, *_args, **_kwargs): - if False: - yield None - - class FakeGraphService: - def __init__(self, _db): - pass - - async def create_default_deep_agents_graph(self, **_kwargs): - return FakeGraph() - - async def fake_safe_get_state(*_args, **_kwargs): - return MagicMock(tasks=[], values={}) - - with ( - patch("app.websocket.chat_ws_handler.AsyncSessionLocal") as mock_session_cls, - patch("app.websocket.chat_ws_handler.get_or_create_conversation", AsyncMock(return_value=("thread-ack", True))), - patch("app.websocket.chat_ws_handler.save_user_message", AsyncMock()), - patch( - "app.websocket.chat_ws_handler.get_user_config", - AsyncMock( - return_value=( - {"configurable": {"thread_id": "thread-ack"}}, - {}, - { - "llm_model": "gpt-test", - "api_key": "test", - "base_url": "http://example.invalid", - "max_tokens": 1024, - }, - ) - ), - ), - patch("app.websocket.chat_ws_handler.GraphService", FakeGraphService), - patch("app.websocket.chat_ws_handler.safe_get_state", side_effect=fake_safe_get_state), - patch("app.websocket.chat_ws_handler.task_manager") as mock_tm, - patch.object(handler, "_finalize_task", AsyncMock()), - patch("app.websocket.chat_ws_handler.ArtifactCollector") as mock_artifacts, - ): - mock_cm = AsyncMock() - mock_cm.__aenter__ = AsyncMock(return_value=MagicMock()) - mock_cm.__aexit__ = AsyncMock(return_value=False) - mock_session_cls.return_value = mock_cm - mock_tm.register_task = AsyncMock() - mock_tm.is_stopped = AsyncMock(return_value=False) - mock_artifacts.return_value.ensure_run_dir = MagicMock() - - await handler._run_chat_turn(request_id="req-accepted", payload=payload) - - sent_types = [f["type"] for f in ws.sent] - assert "accepted" in sent_types, "accepted ack must be emitted once the turn is registered" - assert "status" in sent_types, "connected status must still be emitted" - assert sent_types.index("accepted") < sent_types.index("status") - - accepted_frame = ws.frames_of_type("accepted")[0] - assert accepted_frame["request_id"] == "req-accepted" - assert accepted_frame["thread_id"] == "thread-ack" diff --git a/backend/tests/test_api/test_copilot_event_mirroring.py b/backend/tests/test_api/test_copilot_event_mirroring.py deleted file mode 100644 index 6b1d04764..000000000 --- a/backend/tests/test_api/test_copilot_event_mirroring.py +++ /dev/null @@ -1,175 +0,0 @@ -"""Tests for copilot event mirroring logic. - -Verifies that copilot-specific event types are correctly translated into -payloads that the copilot reducer can process. This tests the translation -logic used by _mirror_run_stream_event in chat_ws_handler.py. -""" - -from __future__ import annotations - -from typing import Any - -from app.services.run_reducers.copilot import apply_copilot_event, make_initial_projection - - -def _mirror_event_to_payload( - event_type: str, - data: dict[str, Any], - assistant_message_id: str = "msg-1", -) -> tuple[str, dict[str, Any] | None]: - """ - Reproduce the event-type → payload translation from _mirror_run_stream_event. - - Returns (stored_event_type, payload) — None payload means event is dropped. - """ - payload: dict[str, Any] | None = None - - if event_type == "status": - stage = data.get("stage") - if stage is not None: - payload = {"stage": stage, "message": data.get("message", "")} - else: - message = str(data.get("status") or "") - payload = {"message": message, "status": message} - elif event_type == "content" and assistant_message_id: - delta = data.get("delta") if "delta" in data else data.get("content") - if delta: - payload = {"message_id": assistant_message_id, "delta": str(delta)} - elif event_type in ("thought_step", "tool_call", "tool_result", "result"): - payload = data - elif event_type == "error": - payload = {"message": data.get("message"), "code": data.get("code")} - elif event_type == "done": - payload = {} - - stored_type = "content_delta" if event_type == "content" else event_type - return stored_type, payload - - -# --- Mirror translation tests --- - - -def test_copilot_status_with_stage(): - """Copilot status events preserve the 'stage' field.""" - event_type, payload = _mirror_event_to_payload( - "status", - {"type": "status", "stage": "thinking", "message": "Analyzing..."}, - ) - assert event_type == "status" - assert payload == {"stage": "thinking", "message": "Analyzing..."} - - -def test_chat_status_without_stage(): - """Chat status events (no stage) still work.""" - event_type, payload = _mirror_event_to_payload( - "status", - {"status": "processing"}, - ) - assert event_type == "status" - assert payload == {"message": "processing", "status": "processing"} - - -def test_copilot_content_uses_content_key(): - """Copilot content events use 'content' key instead of 'delta'.""" - event_type, payload = _mirror_event_to_payload( - "content", - {"type": "content", "content": "Hello"}, - ) - assert event_type == "content_delta" - assert payload is not None - assert payload["delta"] == "Hello" - - -def test_chat_content_uses_delta_key(): - """Chat content events with 'delta' still work.""" - event_type, payload = _mirror_event_to_payload( - "content", - {"delta": "chunk"}, - ) - assert event_type == "content_delta" - assert payload is not None - assert payload["delta"] == "chunk" - - -def test_thought_step_passthrough(): - data = {"type": "thought_step", "step": "Considering graph"} - event_type, payload = _mirror_event_to_payload("thought_step", data) - assert event_type == "thought_step" - assert payload is data # pass-through, not copied - - -def test_tool_call_passthrough(): - data = {"type": "tool_call", "tool": "search", "input": {"q": "test"}} - event_type, payload = _mirror_event_to_payload("tool_call", data) - assert event_type == "tool_call" - assert payload["tool"] == "search" - assert payload["input"] == {"q": "test"} - - -def test_tool_result_passthrough(): - action = {"type": "add_node", "payload": {"name": "n1"}} - data = {"type": "tool_result", "action": action} - event_type, payload = _mirror_event_to_payload("tool_result", data) - assert event_type == "tool_result" - assert payload["action"] == action - - -def test_result_passthrough(): - actions = [{"type": "add_node", "payload": {}}] - data = {"type": "result", "message": "Done!", "actions": actions} - event_type, payload = _mirror_event_to_payload("result", data) - assert event_type == "result" - assert payload["message"] == "Done!" - assert payload["actions"] == actions - - -def test_unknown_event_type_dropped(): - """Unknown event types produce None payload and are dropped.""" - _, payload = _mirror_event_to_payload("unknown_copilot_event", {"foo": "bar"}) - assert payload is None - - -# --- End-to-end: mirror → reducer integration --- - - -def test_copilot_event_flow_mirror_to_reducer(): - """Full pipeline: copilot events → mirror translation → reducer produces correct projection.""" - events = [ - ("status", {"type": "status", "stage": "thinking", "message": "Thinking..."}), - ("content", {"type": "content", "content": "Here is "}), - ("content", {"type": "content", "content": "the answer."}), - ("thought_step", {"type": "thought_step", "step": "Analyzed requirements"}), - ("tool_call", {"type": "tool_call", "tool": "add_node", "input": {"name": "n1"}}), - ("tool_result", {"type": "tool_result", "action": {"type": "add_node", "payload": {"name": "n1"}}}), - ( - "result", - {"type": "result", "message": "Built your pipeline.", "actions": [{"type": "add_node", "payload": {}}]}, - ), - ("done", {}), - ] - - projection = make_initial_projection({"graph_id": "g1", "mode": "deepagents"}, "running") - - for raw_type, data in events: - stored_type, payload = _mirror_event_to_payload(raw_type, data) - if payload is None: - continue - projection = apply_copilot_event( - projection, - event_type=stored_type, - payload=payload, - status="running" if raw_type != "done" else "completed", - ) - - assert projection["stage"] == "thinking" - assert projection["content"] == "Here is the answer." - assert len(projection["thought_steps"]) == 1 - assert projection["thought_steps"][0] == "Analyzed requirements" - assert len(projection["tool_calls"]) == 1 - assert projection["tool_calls"][0]["tool"] == "add_node" - assert len(projection["tool_results"]) == 1 - assert projection["result_message"] == "Built your pipeline." - assert len(projection["result_actions"]) == 1 - assert projection["status"] == "completed" - assert projection["graph_id"] == "g1" - assert projection["mode"] == "deepagents" diff --git a/backend/tests/test_api/test_copilot_history_from_runs.py b/backend/tests/test_api/test_copilot_history_from_runs.py deleted file mode 100644 index 73c51f542..000000000 --- a/backend/tests/test_api/test_copilot_history_from_runs.py +++ /dev/null @@ -1,109 +0,0 @@ -"""Tests for copilot history built from agent_run snapshots.""" - -import uuid -from datetime import datetime, timezone -from unittest.mock import MagicMock - - -def _make_run(*, graph_id: str, title: str, status: str = "completed") -> MagicMock: - run = MagicMock() - run.id = uuid.uuid4() - run.graph_id = uuid.UUID(graph_id) - run.title = title - run.status = status - run.created_at = datetime(2026, 1, 1, 0, 0, 0, tzinfo=timezone.utc) - run.updated_at = datetime(2026, 1, 1, 0, 1, 0, tzinfo=timezone.utc) - return run - - -def _make_snapshot( - *, - result_message: str, - result_actions: list | None = None, - thought_steps: list | None = None, - tool_calls: list | None = None, -) -> MagicMock: - snap = MagicMock() - snap.projection = { - "result_message": result_message, - "result_actions": result_actions or [], - "content": "", - "thought_steps": thought_steps or [], - "tool_calls": tool_calls or [], - } - return snap - - -def test_build_history_messages_from_runs(): - """History messages are assembled from run title + snapshot projection.""" - graph_id = str(uuid.uuid4()) - # DB returns newest-first; reversed() produces oldest-first for the response - runs = [ - _make_run(graph_id=graph_id, title="Build RAG"), # newest - _make_run(graph_id=graph_id, title="Hello"), # oldest - ] - snapshots = { - runs[0].id: _make_snapshot( - result_message="Here is your RAG pipeline.", result_actions=[{"type": "add_node", "payload": {}}] - ), - runs[1].id: _make_snapshot(result_message="Hi! How can I help?"), - } - - messages = [] - for run in reversed(list(runs)): # oldest first - snap = snapshots.get(run.id) - if not snap or not snap.projection: - continue - p = snap.projection - messages.append( - { - "role": "user", - "content": run.title or "", - "created_at": run.created_at.isoformat(), - } - ) - messages.append( - { - "role": "assistant", - "content": p.get("result_message") or p.get("content", ""), - "created_at": run.updated_at.isoformat(), - "actions": p.get("result_actions", []), - "thought_steps": p.get("thought_steps", []), - "tool_calls": p.get("tool_calls", []), - } - ) - - assert len(messages) == 4 - assert messages[0]["role"] == "user" - assert messages[0]["content"] == "Hello" - assert messages[1]["role"] == "assistant" - assert messages[1]["content"] == "Hi! How can I help?" - assert messages[2]["content"] == "Build RAG" - assert messages[3]["actions"] == [{"type": "add_node", "payload": {}}] - - -def test_build_history_filters_by_graph_id(): - """Only runs matching the requested graph_id are included.""" - target_gid = str(uuid.uuid4()) - other_gid = str(uuid.uuid4()) - runs = [ - _make_run(graph_id=target_gid, title="Match"), - _make_run(graph_id=other_gid, title="NoMatch"), - ] - filtered = [r for r in runs if str(r.graph_id) == target_gid] - assert len(filtered) == 1 - assert filtered[0].title == "Match" - - -def test_build_history_skips_runs_without_snapshot(): - """Runs that have no snapshot are silently skipped.""" - graph_id = str(uuid.uuid4()) - runs = [_make_run(graph_id=graph_id, title="NoSnap")] - - messages = [] - for run in reversed(list(runs)): - snapshot = None # simulate missing snapshot - if not snapshot: - continue - # would append messages here, but we skip - assert len(messages) == 0 diff --git a/backend/tests/test_api/test_runs_api.py b/backend/tests/test_api/test_runs_api.py deleted file mode 100644 index 6d846190c..000000000 --- a/backend/tests/test_api/test_runs_api.py +++ /dev/null @@ -1,153 +0,0 @@ -from __future__ import annotations - -import uuid -from datetime import UTC, datetime -from unittest.mock import AsyncMock, MagicMock, patch - -import pytest -from fastapi import FastAPI -from fastapi.testclient import TestClient - -from app.api.v1.runs import router -from app.core.database import get_db -from app.models.agent_run import AgentRunStatus -from app.models.auth import AuthUser as User - - -async def mock_get_current_user(): - user = MagicMock(spec=User) - user.id = "user-123" - return user - - -async def mock_get_db(): - yield AsyncMock() - - -@pytest.fixture -def client(): - test_app = FastAPI() - test_app.include_router(router) - - from app.common.dependencies import get_current_user - - test_app.dependency_overrides[get_current_user] = mock_get_current_user - test_app.dependency_overrides[get_db] = mock_get_db - - with TestClient(test_app) as c: - yield c - - -def make_run(*, agent_name: str = "skill_creator", run_type: str = "generic_agent") -> MagicMock: - now = datetime.now(UTC) - run = MagicMock() - run.id = uuid.uuid4() - run.status = AgentRunStatus.RUNNING - run.run_type = run_type - run.agent_name = agent_name - run.source = "run_center" - run.thread_id = "thread-123" - run.graph_id = uuid.uuid4() - run.title = "Build a skill" - run.started_at = now - run.finished_at = None - run.last_seq = 5 - run.error_code = None - run.error_message = None - run.last_heartbeat_at = now - run.updated_at = now - return run - - -@patch("app.api.v1.runs.RunService") -def test_list_runs_forwards_agent_filters_and_returns_agent_fields(mock_service_cls, client: TestClient) -> None: - mock_service = mock_service_cls.return_value - mock_service.list_recent_runs = AsyncMock(return_value=[make_run()]) - - response = client.get( - "/v1/runs", - params={ - "run_type": "generic_agent", - "agent_name": "skill_creator", - "status": "running", - "search": "skill", - "limit": 25, - }, - ) - - assert response.status_code == 200 - body = response.json() - assert body["data"]["items"][0]["agent_name"] == "skill_creator" - assert body["data"]["items"][0]["agent_display_name"] == "Skill Creator" - mock_service.list_recent_runs.assert_awaited_once_with( - user_id="user-123", - run_type="generic_agent", - agent_name="skill_creator", - status="running", - search="skill", - limit=25, - ) - - -@patch("app.api.v1.runs.RunService") -def test_list_agents_returns_registered_agent_definitions(mock_service_cls, client: TestClient) -> None: - mock_service = mock_service_cls.return_value - mock_service.list_agents = AsyncMock( - return_value=[ - MagicMock(agent_name="skill_creator", display_name="Skill Creator"), - ] - ) - - response = client.get("/v1/runs/agents") - - assert response.status_code == 200 - assert response.json()["data"]["items"] == [ - {"agent_name": "skill_creator", "display_name": "Skill Creator"}, - ] - - -@patch("app.api.v1.runs.RunService") -def test_create_run_uses_generic_agent_endpoint(mock_service_cls, client: TestClient) -> None: - mock_service = mock_service_cls.return_value - created_run = make_run() - mock_service.create_run = AsyncMock(return_value=created_run) - - response = client.post( - "/v1/runs", - json={ - "agent_name": "skill_creator", - "graph_id": str(created_run.graph_id), - "message": "Build a reusable skill", - "thread_id": "thread-123", - "input": {"edit_skill_id": "skill-1"}, - }, - ) - - assert response.status_code == 200 - assert response.json()["data"]["run_id"] == str(created_run.id) - mock_service.create_run.assert_awaited_once() - - -@patch("app.api.v1.runs.RunService") -def test_find_active_run_uses_generic_agent_name_filter(mock_service_cls, client: TestClient) -> None: - active_run = make_run() - mock_service = mock_service_cls.return_value - mock_service.find_latest_active_run = AsyncMock(return_value=active_run) - - response = client.get( - "/v1/runs/active", - params={ - "agent_name": "skill_creator", - "graph_id": str(active_run.graph_id), - "thread_id": "thread-123", - }, - ) - - assert response.status_code == 200 - assert response.json()["data"]["run_id"] == str(active_run.id) - mock_service.find_latest_active_run.assert_awaited_once_with( - user_id="user-123", - agent_name="skill_creator", - graph_id=active_run.graph_id, - thread_id="thread-123", - ) diff --git a/backend/tests/test_api/test_skill_creator.py b/backend/tests/test_api/test_skill_creator.py deleted file mode 100644 index bf032300e..000000000 --- a/backend/tests/test_api/test_skill_creator.py +++ /dev/null @@ -1,66 +0,0 @@ -"""Integration smoke tests for the Skill Creator feature.""" - -import json -import tempfile -from pathlib import Path - -import pytest - -from app.core.tools.builtin.preview_skill import preview_skill_in_sandbox -from app.schemas.chat import ChatRequest - - -class TestSkillCreatorIntegration: - """Verify key components of the Skill Creator feature work together.""" - - def test_chat_request_accepts_skill_creator_metadata(self): - req = ChatRequest(message="Create a network scan skill", metadata={"mode": "skill_creator"}) - assert req.metadata["mode"] == "skill_creator" - - def test_chat_request_skill_creator_with_edit_metadata(self): - req = ChatRequest( - message="Update this skill", - metadata={"mode": "skill_creator", "edit_skill_id": "abc-123"}, - ) - assert req.metadata["mode"] == "skill_creator" - assert req.metadata["edit_skill_id"] == "abc-123" - - def test_preview_skill_end_to_end(self): - """Simulate: agent creates skill files in sandbox -> preview_skill reads them.""" - with tempfile.TemporaryDirectory() as sandbox_root: - skill_dir = Path(sandbox_root) / "thread-123" / "skills" / "test-scan" - skill_dir.mkdir(parents=True) - - # Simulate agent writing SKILL.md - (skill_dir / "SKILL.md").write_text( - "---\nname: test-scan\ndescription: A network scanning skill\n---\n" - "# Test Scan Skill\n\nThis skill performs network scanning." - ) - - # Simulate agent writing a script - scripts_dir = skill_dir / "scripts" - scripts_dir.mkdir() - (scripts_dir / "scan.py").write_text("import subprocess\ndef run_scan(target): pass") - - # Call preview_skill (same as agent would) - result_json = preview_skill_in_sandbox("test-scan", sandbox_root) - result = json.loads(result_json) - - assert result["skill_name"] == "test-scan" - assert result["validation"]["valid"] is True - assert len(result["files"]) == 2 - - # Verify files match what would be sent to POST /v1/skills - paths = {f["path"] for f in result["files"]} - assert "SKILL.md" in paths - assert "scripts/scan.py" in paths - - def test_graph_service_has_skill_creator_method(self): - """Verify GraphService has the create_skill_creator_graph method.""" - try: - from app.services.graph_service import GraphService - except ImportError as exc: - pytest.skip(f"GraphService import requires optional deps: {exc}") - - assert hasattr(GraphService, "create_skill_creator_graph") - assert callable(getattr(GraphService, "create_skill_creator_graph")) diff --git a/backend/tests/test_common/__init__.py b/backend/tests/test_common/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/backend/tests/test_common/test_pagination.py b/backend/tests/test_common/test_pagination.py deleted file mode 100644 index ae17358ce..000000000 --- a/backend/tests/test_common/test_pagination.py +++ /dev/null @@ -1,18 +0,0 @@ -import pytest -from pydantic import ValidationError - -from app.common.pagination import ConversationMessagesPaginationParams, PaginationParams - - -def test_conversation_messages_pagination_allows_page_size_200(): - params = ConversationMessagesPaginationParams(page=1, page_size=200) - - assert params.page == 1 - assert params.page_size == 200 - assert params.offset == 0 - assert params.limit == 200 - - -def test_common_pagination_still_rejects_page_size_200(): - with pytest.raises(ValidationError): - PaginationParams(page=1, page_size=200) diff --git a/backend/tests/test_common/test_permissions.py b/backend/tests/test_common/test_permissions.py deleted file mode 100644 index bde912f8c..000000000 --- a/backend/tests/test_common/test_permissions.py +++ /dev/null @@ -1,78 +0,0 @@ -"""Tests for common permissions checking.""" - -from app.common.permissions import _scope_satisfies, check_token_permission - - -def test_global_token_satisfies(): - assert check_token_permission(["skills:read"], "skills:read", "skill", "123", None, None) is True - assert check_token_permission(["skills:admin"], "skills:execute", "skill", "123", None, None) is True - - -def test_scope_hierarchy(): - assert check_token_permission(["skills:execute"], "skills:read", "skill", "1", None, None) is True - assert check_token_permission(["skills:read"], "skills:execute", "skill", "1", None, None) is False - - -def test_resource_binding(): - # Matches bound resource - assert check_token_permission(["skills:execute"], "skills:execute", "skill", "1", "skill", "1") is True - # Fails different resource ID - assert check_token_permission(["skills:execute"], "skills:execute", "skill", "2", "skill", "1") is False - # Fails different resource type binding - assert check_token_permission(["graphs:execute"], "graphs:execute", "graph", "1", "skill", "1") is False - - -def test_scope_satisfies_same_scope(): - assert _scope_satisfies("skills:read", "skills:read") is True - assert _scope_satisfies("graphs:execute", "graphs:execute") is True - - -def test_scope_satisfies_higher_covers_lower(): - """admin > publish > execute > write > read for skills.""" - assert _scope_satisfies("skills:admin", "skills:read") is True - assert _scope_satisfies("skills:admin", "skills:publish") is True - assert _scope_satisfies("skills:publish", "skills:execute") is True - assert _scope_satisfies("skills:write", "skills:read") is True - - -def test_scope_satisfies_lower_does_not_cover_higher(): - assert _scope_satisfies("skills:read", "skills:write") is False - assert _scope_satisfies("skills:execute", "skills:publish") is False - assert _scope_satisfies("skills:publish", "skills:admin") is False - - -def test_scope_satisfies_cross_resource_fails(): - """skills:admin should NOT satisfy graphs:read.""" - assert _scope_satisfies("skills:admin", "graphs:read") is False - assert _scope_satisfies("graphs:execute", "tools:execute") is False - - -def test_scope_satisfies_malformed_scope(): - assert _scope_satisfies("invalid", "skills:read") is False - assert _scope_satisfies("skills:read", "invalid") is False - assert _scope_satisfies("", "") is False - - -def test_empty_scopes_list(): - assert check_token_permission([], "skills:read", "skill", "1", None, None) is False - - -def test_multiple_scopes_any_match(): - """If any scope in the list satisfies, permission is granted.""" - assert check_token_permission(["tools:read", "skills:execute"], "skills:read", "skill", "1", None, None) is True - - -def test_graphs_hierarchy(): - assert _scope_satisfies("graphs:execute", "graphs:read") is True - assert _scope_satisfies("graphs:read", "graphs:execute") is False - - -def test_tools_hierarchy(): - assert _scope_satisfies("tools:execute", "tools:read") is True - assert _scope_satisfies("tools:read", "tools:execute") is False - - -def test_resource_binding_with_uuid_strings(): - """resource_id comparison uses str() so UUIDs and strings should match.""" - uid = "550e8400-e29b-41d4-a716-446655440000" - assert check_token_permission(["skills:execute"], "skills:execute", "skill", uid, "skill", uid) is True diff --git a/backend/tests/test_model_providers_api.py b/backend/tests/test_model_providers_api.py deleted file mode 100644 index 74e4e1644..000000000 --- a/backend/tests/test_model_providers_api.py +++ /dev/null @@ -1,166 +0,0 @@ -""" -Tests for model_providers API — PATCH defaults, auth, 404 handling. -""" - -from unittest.mock import AsyncMock, MagicMock, patch - -import pytest -from fastapi import FastAPI -from fastapi.testclient import TestClient - -from app.api.v1.model_providers import router -from app.common.dependencies import get_current_user -from app.core.database import get_db -from app.models.auth import AuthUser as User - -# --------------------------------------------------------------------------- -# Fixtures -# --------------------------------------------------------------------------- - - -async def mock_get_current_user(): - user = MagicMock(spec=User) - user.id = "user-123" - return user - - -async def mock_get_db(): - yield AsyncMock() - - -@pytest.fixture -def client(): - test_app = FastAPI() - test_app.include_router(router) - test_app.dependency_overrides[get_current_user] = mock_get_current_user - test_app.dependency_overrides[get_db] = mock_get_db - with TestClient(test_app) as c: - yield c - - -def _make_provider(name: str = "anthropic") -> dict: - return { - "provider_name": name, - "display_name": "Anthropic", - "supported_model_types": ["chat"], - "credential_schema": {}, - "config_schemas": {}, - "model_count": 3, - "default_parameters": {"temperature": 0.7}, - "is_template": False, - "provider_type": "system", - "template_name": None, - "is_enabled": True, - } - - -# --------------------------------------------------------------------------- -# GET / — list providers -# --------------------------------------------------------------------------- - - -@patch("app.api.v1.model_providers.ModelProviderService") -def test_list_providers_returns_200(mock_cls, client: TestClient): - mock_svc = mock_cls.return_value - mock_svc.get_all_providers = AsyncMock(return_value=[_make_provider()]) - - resp = client.get("/v1/model-providers") - - assert resp.status_code == 200 - data = resp.json()["data"] - assert len(data) == 1 - assert data[0]["provider_name"] == "anthropic" - assert data[0]["default_parameters"] == {"temperature": 0.7} - - -# --------------------------------------------------------------------------- -# GET /{name} — get single provider -# --------------------------------------------------------------------------- - - -@patch("app.api.v1.model_providers.ModelProviderService") -def test_get_provider_returns_200(mock_cls, client: TestClient): - mock_svc = mock_cls.return_value - mock_svc.get_provider = AsyncMock(return_value=_make_provider()) - - resp = client.get("/v1/model-providers/anthropic") - - assert resp.status_code == 200 - assert resp.json()["data"]["provider_name"] == "anthropic" - - -@patch("app.api.v1.model_providers.ModelProviderService") -def test_get_provider_not_found_returns_404(mock_cls, client: TestClient): - mock_svc = mock_cls.return_value - mock_svc.get_provider = AsyncMock(return_value=None) - - resp = client.get("/v1/model-providers/nonexistent") - - assert resp.status_code == 404 - - -# --------------------------------------------------------------------------- -# PATCH /{name}/defaults — update default parameters -# --------------------------------------------------------------------------- - - -@patch("app.api.v1.model_providers.ModelProviderService") -def test_patch_defaults_returns_updated_provider(mock_cls, client: TestClient): - updated = _make_provider() - updated["default_parameters"] = {"temperature": 0.9, "max_tokens": 4096} - - mock_svc = mock_cls.return_value - mock_svc.update_provider_defaults = AsyncMock(return_value=updated) - - resp = client.patch( - "/v1/model-providers/anthropic/defaults", - json={"default_parameters": {"temperature": 0.9, "max_tokens": 4096}}, - ) - - assert resp.status_code == 200 - data = resp.json()["data"] - assert data["default_parameters"]["temperature"] == 0.9 - assert data["default_parameters"]["max_tokens"] == 4096 - mock_svc.update_provider_defaults.assert_called_once_with("anthropic", {"temperature": 0.9, "max_tokens": 4096}) - - -@patch("app.api.v1.model_providers.ModelProviderService") -def test_patch_defaults_provider_not_found_returns_404(mock_cls, client: TestClient): - from app.common.exceptions import NotFoundException - - mock_svc = mock_cls.return_value - mock_svc.update_provider_defaults = AsyncMock(side_effect=NotFoundException("Provider not found: nonexistent")) - - resp = client.patch( - "/v1/model-providers/nonexistent/defaults", - json={"default_parameters": {"temperature": 0.5}}, - ) - - assert resp.status_code == 404 - - -# --------------------------------------------------------------------------- -# Auth — no token returns 401 -# --------------------------------------------------------------------------- - - -def test_list_providers_without_auth_returns_401(): - test_app = FastAPI() - test_app.include_router(router) - # No dependency override — real auth dependency will reject missing token - test_app.dependency_overrides[get_db] = mock_get_db - with TestClient(test_app, raise_server_exceptions=False) as c: - resp = c.get("/v1/model-providers") - assert resp.status_code == 401 - - -def test_patch_defaults_without_auth_returns_401(): - test_app = FastAPI() - test_app.include_router(router) - test_app.dependency_overrides[get_db] = mock_get_db - with TestClient(test_app, raise_server_exceptions=False) as c: - resp = c.patch( - "/v1/model-providers/anthropic/defaults", - json={"default_parameters": {}}, - ) - assert resp.status_code == 401 diff --git a/backend/tests/test_models_api.py b/backend/tests/test_models_api.py deleted file mode 100644 index 85e950e56..000000000 --- a/backend/tests/test_models_api.py +++ /dev/null @@ -1,180 +0,0 @@ -""" -Tests for models API — PATCH instances/{id}, GET overview, unavailable_reason. -""" - -import uuid -from unittest.mock import AsyncMock, MagicMock, patch - -import pytest -from fastapi import FastAPI -from fastapi.testclient import TestClient - -from app.api.v1.models import router -from app.common.dependencies import get_current_user -from app.core.database import get_db -from app.models.auth import AuthUser as User - -# --------------------------------------------------------------------------- -# Fixtures -# --------------------------------------------------------------------------- - - -async def mock_get_current_user(): - user = MagicMock(spec=User) - user.id = "user-123" - return user - - -async def mock_get_db(): - yield AsyncMock() - - -@pytest.fixture -def client(): - test_app = FastAPI() - test_app.include_router(router) - test_app.dependency_overrides[get_current_user] = mock_get_current_user - test_app.dependency_overrides[get_db] = mock_get_db - with TestClient(test_app) as c: - yield c - - -def _make_instance(instance_id: str | None = None) -> dict: - return { - "id": instance_id or str(uuid.uuid4()), - "provider_name": "anthropic", - "model_name": "claude-3-5-sonnet", - "model_type": "chat", - "model_parameters": {"temperature": 0.7}, - } - - -def _make_overview() -> dict: - return { - "total_providers": 3, - "healthy_providers": 2, - "unhealthy_providers": 0, - "unconfigured_providers": 1, - "total_models": 10, - "available_models": 8, - "recent_credential_failure": None, - } - - -# --------------------------------------------------------------------------- -# GET /overview -# --------------------------------------------------------------------------- - - -@patch("app.api.v1.models.ModelService") -def test_get_overview_returns_200(mock_cls, client: TestClient): - mock_svc = mock_cls.return_value - mock_svc.get_overview = AsyncMock(return_value=_make_overview()) - - resp = client.get("/v1/models/overview") - - assert resp.status_code == 200 - data = resp.json()["data"] - assert data["total_providers"] == 3 - assert data["healthy_providers"] == 2 - - -# --------------------------------------------------------------------------- -# GET / — list models with unavailable_reason -# --------------------------------------------------------------------------- - - -@patch("app.api.v1.models.ModelService") -def test_list_models_includes_unavailable_reason(mock_cls, client: TestClient): - models = [ - { - "provider_name": "anthropic", - "provider_display_name": "Anthropic", - "name": "claude-3-5-sonnet", - "display_name": "Claude 3.5 Sonnet", - "description": "", - "is_available": False, - "unavailable_reason": "no_credentials", - } - ] - mock_svc = mock_cls.return_value - mock_svc.get_available_models = AsyncMock(return_value=models) - - resp = client.get("/v1/models?model_type=chat") - - assert resp.status_code == 200 - item = resp.json()["data"][0] - assert item["is_available"] is False - assert item["unavailable_reason"] == "no_credentials" - - -# --------------------------------------------------------------------------- -# PATCH /instances/{id} — update model instance -# --------------------------------------------------------------------------- - - -@patch("app.api.v1.models.ModelService") -def test_patch_instance_updates_parameters(mock_cls, client: TestClient): - instance_id = str(uuid.uuid4()) - updated = _make_instance(instance_id) - updated["model_parameters"] = {"temperature": 0.9} - - mock_svc = mock_cls.return_value - mock_svc.update_model_instance = AsyncMock(return_value=updated) - - resp = client.patch( - f"/v1/models/instances/{instance_id}", - json={"model_parameters": {"temperature": 0.9}}, - ) - - assert resp.status_code == 200 - data = resp.json()["data"] - assert data["model_parameters"]["temperature"] == 0.9 - mock_svc.update_model_instance.assert_called_once() - - -@patch("app.api.v1.models.ModelService") -def test_patch_instance_not_found_returns_404(mock_cls, client: TestClient): - from app.common.exceptions import NotFoundException - - mock_svc = mock_cls.return_value - mock_svc.update_model_instance = AsyncMock(side_effect=NotFoundException("Model instance not found")) - - resp = client.patch( - f"/v1/models/instances/{uuid.uuid4()}", - json={"model_parameters": {}}, - ) - - assert resp.status_code == 404 - - -# --------------------------------------------------------------------------- -# Auth — no token returns 401 -# --------------------------------------------------------------------------- - - -def test_list_models_without_auth_returns_401(): - test_app = FastAPI() - test_app.include_router(router) - test_app.dependency_overrides[get_db] = mock_get_db - with TestClient(test_app, raise_server_exceptions=False) as c: - resp = c.get("/v1/models") - assert resp.status_code == 401 - - -def test_get_overview_without_auth_returns_401(): - test_app = FastAPI() - test_app.include_router(router) - test_app.dependency_overrides[get_db] = mock_get_db - with TestClient(test_app, raise_server_exceptions=False) as c: - resp = c.get("/v1/models/overview") - assert resp.status_code == 401 - - -def test_patch_instance_without_auth_returns_401(): - test_app = FastAPI() - test_app.include_router(router) - test_app.dependency_overrides[get_db] = mock_get_db - with TestClient(test_app, raise_server_exceptions=False) as c: - resp = c.patch(f"/v1/models/instances/{uuid.uuid4()}", json={}) - assert resp.status_code == 401 diff --git a/backend/tests/test_schemas/__init__.py b/backend/tests/test_schemas/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/backend/tests/test_schemas/test_chat.py b/backend/tests/test_schemas/test_chat.py deleted file mode 100644 index 1dd58784b..000000000 --- a/backend/tests/test_schemas/test_chat.py +++ /dev/null @@ -1,87 +0,0 @@ -"""Unit tests for ChatRequest schema — generic chat-only fields.""" - -from __future__ import annotations - -import uuid - -import pytest -from pydantic import ValidationError - -from app.schemas.chat import ChatRequest - -# --------------------------------------------------------------------------- -# Baseline: existing fields still work -# --------------------------------------------------------------------------- - - -class TestChatRequestBaseline: - """Ensure the existing ChatRequest contract is unbroken.""" - - def test_minimal_request(self): - """Only the required `message` field is needed.""" - req = ChatRequest(message="hello") - assert req.message == "hello" - assert req.thread_id is None - assert req.graph_id is None - assert req.metadata == {} - - def test_all_existing_fields(self): - gid = uuid.uuid4() - req = ChatRequest( - message="hi", - thread_id="t-1", - graph_id=gid, - provider_name="openai", - model_name="gpt-4o", - metadata={"key": "value"}, - ) - assert req.thread_id == "t-1" - assert req.graph_id == gid - assert req.provider_name == "openai" - assert req.model_name == "gpt-4o" - assert req.metadata == {"key": "value"} - - -# --------------------------------------------------------------------------- -# Metadata handling -# --------------------------------------------------------------------------- - - -class TestChatRequestMetadata: - """Ensure metadata remains flexible but scoped to generic chat context.""" - - def test_metadata_defaults_to_empty_dict(self): - req = ChatRequest(message="hello") - assert req.metadata == {} - - def test_metadata_is_copied(self): - meta = {"foo": "bar"} - req = ChatRequest(message="hello", metadata=meta) - assert req.metadata == {"foo": "bar"} - meta["foo"] = "baz" - assert req.metadata == {"foo": "bar"} - - def test_metadata_accepts_files_list(self): - files = [ - {"filename": "notes.md", "path": "/tmp/notes.md", "size": 12}, - {"filename": "plan.txt", "path": "/tmp/plan.txt", "size": 8}, - ] - req = ChatRequest(message="hi", metadata={"files": files}) - assert req.metadata["files"] == files - - -# --------------------------------------------------------------------------- -# Extra field rejection -# --------------------------------------------------------------------------- - - -class TestChatRequestExtraFields: - """ChatRequest should stay generic and forbid transport-only fields.""" - - def test_mode_field_is_rejected(self): - with pytest.raises(ValidationError): - ChatRequest(message="hello", mode="skill_creator") # type: ignore[arg-type] - - def test_edit_skill_id_field_is_rejected(self): - with pytest.raises(ValidationError): - ChatRequest(message="hello", edit_skill_id="skill-1") # type: ignore[arg-type] diff --git a/backend/tests/test_services/__init__.py b/backend/tests/test_services/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/backend/tests/test_services/test_agent_registry.py b/backend/tests/test_services/test_agent_registry.py deleted file mode 100644 index 38712c56d..000000000 --- a/backend/tests/test_services/test_agent_registry.py +++ /dev/null @@ -1,17 +0,0 @@ -from app.services.agent_registry import agent_registry - - -def test_agent_registry_exposes_skill_creator_definition() -> None: - definition = agent_registry.get("skill_creator") - - assert definition.agent_name == "skill_creator" - assert definition.display_name == "Skill Creator" - assert definition.run_type == "skill_creator" - assert callable(definition.reducer) - assert callable(definition.make_initial_projection) - - -def test_agent_registry_lists_registered_agents() -> None: - definitions = agent_registry.list_definitions() - - assert any(definition.agent_name == "skill_creator" for definition in definitions) diff --git a/backend/tests/test_services/test_chat_run_reducer.py b/backend/tests/test_services/test_chat_run_reducer.py deleted file mode 100644 index 47eb023be..000000000 --- a/backend/tests/test_services/test_chat_run_reducer.py +++ /dev/null @@ -1,276 +0,0 @@ -"""Tests for the Chat run reducer and agent registration.""" - -from app.services.agent_registry import agent_registry - - -def test_chat_definition_registered() -> None: - definition = agent_registry.get("chat") - assert definition.agent_name == "chat" - assert definition.display_name == "Chat" - assert definition.run_type == "chat_turn" - - -def test_chat_initial_projection() -> None: - definition = agent_registry.get("chat") - projection = definition.make_initial_projection( - {"graph_id": "graph-1", "thread_id": "thread-1"}, - status="queued", - ) - assert projection["run_type"] == "chat_turn" - assert projection["status"] == "queued" - assert projection["graph_id"] == "graph-1" - assert projection["thread_id"] == "thread-1" - assert projection["user_message"] is None - assert projection["assistant_message"] is None - assert projection["file_tree"] == {} - assert projection["preview_data"] is None - assert projection["node_execution_log"] == [] - assert projection["interrupt"] is None - - -def test_chat_reducer_user_message_added() -> None: - definition = agent_registry.get("chat") - projection = definition.make_initial_projection({}, status="running") - - next_projection = definition.reducer( - projection, - event_type="user_message_added", - payload={"message": {"id": "msg-user-1", "role": "user", "content": "Hello"}}, - status="running", - ) - - assert next_projection["user_message"] == { - "id": "msg-user-1", - "role": "user", - "content": "Hello", - } - - -def test_chat_reducer_assistant_message_started() -> None: - definition = agent_registry.get("chat") - projection = definition.make_initial_projection({}, status="running") - - next_projection = definition.reducer( - projection, - event_type="assistant_message_started", - payload={"message": {"id": "msg-ai-1", "role": "assistant", "content": ""}}, - status="running", - ) - - assert next_projection["assistant_message"] == { - "id": "msg-ai-1", - "role": "assistant", - "content": "", - } - - -def test_chat_reducer_content_delta() -> None: - definition = agent_registry.get("chat") - projection = definition.make_initial_projection({}, status="running") - - projection = definition.reducer( - projection, - event_type="assistant_message_started", - payload={"message": {"id": "msg-ai-1", "role": "assistant", "content": ""}}, - status="running", - ) - projection = definition.reducer( - projection, - event_type="content_delta", - payload={"message_id": "msg-ai-1", "delta": "Hello"}, - status="running", - ) - next_projection = definition.reducer( - projection, - event_type="content_delta", - payload={"message_id": "msg-ai-1", "delta": " world"}, - status="running", - ) - - assert next_projection["assistant_message"]["content"] == "Hello world" - - -def test_chat_reducer_tool_start() -> None: - definition = agent_registry.get("chat") - projection = definition.make_initial_projection({}, status="running") - - projection = definition.reducer( - projection, - event_type="assistant_message_started", - payload={"message": {"id": "msg-ai-1", "role": "assistant", "content": ""}}, - status="running", - ) - next_projection = definition.reducer( - projection, - event_type="tool_start", - payload={ - "message_id": "msg-ai-1", - "tool": {"id": "tool-1", "name": "some_tool", "status": "running"}, - }, - status="running", - ) - - assert next_projection["assistant_message"]["tool_calls"] == [ - {"id": "tool-1", "name": "some_tool", "status": "running"} - ] - - -def test_chat_reducer_tool_end_updates_tool_and_captures_preview() -> None: - definition = agent_registry.get("chat") - projection = definition.make_initial_projection({}, status="running") - - projection = definition.reducer( - projection, - event_type="assistant_message_started", - payload={"message": {"id": "msg-ai-1", "role": "assistant", "content": ""}}, - status="running", - ) - projection = definition.reducer( - projection, - event_type="tool_start", - payload={ - "message_id": "msg-ai-1", - "tool": {"id": "tool-1", "name": "preview_skill", "status": "running"}, - }, - status="running", - ) - next_projection = definition.reducer( - projection, - event_type="tool_end", - payload={ - "message_id": "msg-ai-1", - "tool_id": "tool-1", - "tool_name": "preview_skill", - "tool_output": {"name": "my-skill"}, - }, - status="running", - ) - - tool = next_projection["assistant_message"]["tool_calls"][0] - assert tool["status"] == "completed" - assert tool["result"] == {"name": "my-skill"} - assert next_projection["preview_data"] == {"name": "my-skill"} - - -def test_chat_reducer_file_event_create_and_delete() -> None: - definition = agent_registry.get("chat") - projection = definition.make_initial_projection({}, status="running") - - projection = definition.reducer( - projection, - event_type="file_event", - payload={"path": "/foo/bar.py", "action": "create", "size": 42, "timestamp": 1000}, - status="running", - ) - assert "/foo/bar.py" in projection["file_tree"] - assert projection["file_tree"]["/foo/bar.py"]["action"] == "create" - - next_projection = definition.reducer( - projection, - event_type="file_event", - payload={"path": "/foo/bar.py", "action": "delete"}, - status="running", - ) - assert "/foo/bar.py" not in next_projection["file_tree"] - - -def test_chat_reducer_node_start_and_end() -> None: - definition = agent_registry.get("chat") - projection = definition.make_initial_projection({}, status="running") - - projection = definition.reducer( - projection, - event_type="node_start", - payload={"node_id": "node-1", "node_name": "my_node", "start_time": 100}, - status="running", - ) - assert len(projection["node_execution_log"]) == 1 - assert projection["node_execution_log"][0]["node_id"] == "node-1" - assert projection["node_execution_log"][0]["status"] == "running" - - next_projection = definition.reducer( - projection, - event_type="node_end", - payload={"node_id": "node-1", "node_name": "my_node", "end_time": 200}, - status="running", - ) - assert len(next_projection["node_execution_log"]) == 1 - assert next_projection["node_execution_log"][0]["status"] == "completed" - assert next_projection["node_execution_log"][0]["end_time"] == 200 - - -def test_chat_reducer_interrupt() -> None: - definition = agent_registry.get("chat") - projection = definition.make_initial_projection({}, status="running") - - next_projection = definition.reducer( - projection, - event_type="interrupt", - payload={"interrupt": {"type": "human_approval", "message": "Approve?"}}, - status="interrupted", - ) - - assert next_projection["interrupt"] == {"type": "human_approval", "message": "Approve?"} - assert next_projection["status"] == "interrupted" - - -def test_chat_reducer_error() -> None: - definition = agent_registry.get("chat") - projection = definition.make_initial_projection({}, status="running") - - next_projection = definition.reducer( - projection, - event_type="error", - payload={"message": "Something went wrong"}, - status="failed", - ) - - assert next_projection["meta"]["error"] == "Something went wrong" - assert next_projection["status"] == "failed" - - -def test_chat_reducer_done() -> None: - definition = agent_registry.get("chat") - projection = definition.make_initial_projection({}, status="running") - - next_projection = definition.reducer( - projection, - event_type="done", - payload={}, - status="completed", - ) - - assert next_projection["meta"]["completed"] is True - assert next_projection["status"] == "completed" - - -def test_chat_reducer_status_message() -> None: - definition = agent_registry.get("chat") - projection = definition.make_initial_projection({}, status="running") - - next_projection = definition.reducer( - projection, - event_type="status", - payload={"message": "Processing your request..."}, - status="running", - ) - - assert next_projection["meta"]["status_message"] == "Processing your request..." - - -def test_chat_reducer_run_initialized() -> None: - definition = agent_registry.get("chat") - - next_projection = definition.reducer( - None, - event_type="run_initialized", - payload={"graph_id": "graph-99", "thread_id": "thread-77"}, - status="queued", - ) - - assert next_projection["run_type"] == "chat_turn" - assert next_projection["status"] == "queued" - assert next_projection["graph_id"] == "graph-99" - assert next_projection["thread_id"] == "thread-77" - assert next_projection["user_message"] is None - assert next_projection["assistant_message"] is None diff --git a/backend/tests/test_services/test_copilot_run_reducer.py b/backend/tests/test_services/test_copilot_run_reducer.py deleted file mode 100644 index 1c5f62ecc..000000000 --- a/backend/tests/test_services/test_copilot_run_reducer.py +++ /dev/null @@ -1,97 +0,0 @@ -"""Tests for copilot run projection reducer.""" - -from app.services.run_reducers.copilot import apply_copilot_event, make_initial_projection - - -def _base(): - return make_initial_projection({"graph_id": "g1", "mode": "deepagents"}, "queued") - - -def test_copilot_definition_registered(): - from app.services.agent_registry import agent_registry - - defn = agent_registry.get("copilot") - assert defn.agent_name == "copilot" - assert defn.run_type == "copilot_turn" - - -def test_copilot_initial_projection(): - p = _base() - assert p["run_type"] == "copilot_turn" - assert p["status"] == "queued" - assert p["graph_id"] == "g1" - assert p["mode"] == "deepagents" - assert p["content"] == "" - assert p["thought_steps"] == [] - - -def test_copilot_reducer_run_initialized(): - p = apply_copilot_event( - None, event_type="run_initialized", payload={"graph_id": "g2", "mode": "standard"}, status="running" - ) - assert p["graph_id"] == "g2" - assert p["mode"] == "standard" - - -def test_copilot_reducer_status(): - p = apply_copilot_event( - _base(), event_type="status", payload={"stage": "thinking", "message": "Thinking..."}, status="running" - ) - assert p["stage"] == "thinking" - - -def test_copilot_reducer_content_delta(): - p = apply_copilot_event(_base(), event_type="content_delta", payload={"delta": "Hello "}, status="running") - p = apply_copilot_event(p, event_type="content_delta", payload={"delta": "world"}, status="running") - assert p["content"] == "Hello world" - - -def test_copilot_reducer_thought_step(): - p = apply_copilot_event( - _base(), event_type="thought_step", payload={"step": {"index": 1, "content": "Analyzing"}}, status="running" - ) - assert len(p["thought_steps"]) == 1 - assert p["thought_steps"][0]["content"] == "Analyzing" - - -def test_copilot_reducer_tool_call(): - p = apply_copilot_event( - _base(), event_type="tool_call", payload={"tool": "create_node", "input": {"type": "agent"}}, status="running" - ) - assert len(p["tool_calls"]) == 1 - assert p["tool_calls"][0]["tool"] == "create_node" - - -def test_copilot_reducer_tool_result(): - action = {"type": "CREATE_NODE", "payload": {"id": "n1"}, "reasoning": "Need agent"} - p = apply_copilot_event(_base(), event_type="tool_result", payload={"action": action}, status="running") - assert len(p["tool_results"]) == 1 - assert p["tool_results"][0]["type"] == "CREATE_NODE" - - -def test_copilot_reducer_result(): - actions = [{"type": "CREATE_NODE", "payload": {"id": "n1"}, "reasoning": "test"}] - p = apply_copilot_event( - _base(), event_type="result", payload={"message": "Done!", "actions": actions}, status="running" - ) - assert p["result_message"] == "Done!" - assert len(p["result_actions"]) == 1 - - -def test_copilot_reducer_error(): - p = apply_copilot_event( - _base(), event_type="error", payload={"message": "LLM failed", "code": "AGENT_ERROR"}, status="failed" - ) - assert p["status"] == "failed" - assert p["error"] == "LLM failed" - - -def test_copilot_reducer_done(): - p = apply_copilot_event(_base(), event_type="done", payload={}, status="completed") - assert p["status"] == "completed" - - -def test_copilot_reducer_done_preserves_failed(): - p = apply_copilot_event(_base(), event_type="error", payload={"message": "err"}, status="failed") - p = apply_copilot_event(p, event_type="done", payload={}, status="failed") - assert p["status"] == "failed" diff --git a/backend/tests/test_services/test_platform_token_service.py b/backend/tests/test_services/test_platform_token_service.py deleted file mode 100644 index d593ba8fc..000000000 --- a/backend/tests/test_services/test_platform_token_service.py +++ /dev/null @@ -1,257 +0,0 @@ -"""Unit tests for PlatformTokenService.""" - -from __future__ import annotations - -import uuid -from unittest.mock import AsyncMock, MagicMock, patch - -import pytest - - -def _mock_db(): - db = AsyncMock() - db.commit = AsyncMock() - db.flush = AsyncMock() - db.refresh = AsyncMock() - db.add = MagicMock() - return db - - -class TestPlatformTokenServiceCreate: - """Test token creation logic.""" - - @pytest.mark.asyncio - async def test_create_returns_raw_token_starting_with_sk(self): - """Created token should start with 'sk_' prefix.""" - db = _mock_db() - with patch("app.services.platform_token_service.PlatformTokenRepository") as MockRepo: - MockRepo.return_value.count_active_by_user = AsyncMock(return_value=0) - from app.services.platform_token_service import PlatformTokenService - - service = PlatformTokenService(db) - record, raw_token = await service.create_token( - user_id="user-1", - name="test", - scopes=["skills:read"], - ) - assert raw_token.startswith("sk_") - assert len(raw_token) > 12 - - @pytest.mark.asyncio - async def test_create_rejects_when_limit_exceeded(self): - """Should raise BadRequestException when user has 50 active tokens.""" - from app.common.exceptions import BadRequestException - - db = _mock_db() - with patch("app.services.platform_token_service.PlatformTokenRepository") as MockRepo: - MockRepo.return_value.count_active_by_user = AsyncMock(return_value=50) - from app.services.platform_token_service import PlatformTokenService - - service = PlatformTokenService(db) - with pytest.raises(BadRequestException, match="50"): - await service.create_token( - user_id="user-1", - name="test", - scopes=["skills:read"], - ) - - @pytest.mark.asyncio - async def test_create_rejects_invalid_scope(self): - """Should raise BadRequestException when passing invalid scope.""" - from app.common.exceptions import BadRequestException - - db = _mock_db() - with patch("app.services.platform_token_service.PlatformTokenRepository") as MockRepo: - MockRepo.return_value.count_active_by_user = AsyncMock(return_value=0) - from app.services.platform_token_service import PlatformTokenService - - service = PlatformTokenService(db) - with pytest.raises(BadRequestException, match="Invalid scopes"): - await service.create_token( - user_id="user-1", - name="test", - scopes=["invalid:scope"], - ) - - -class TestPlatformTokenServiceRevoke: - """Test token revocation logic.""" - - @pytest.mark.asyncio - async def test_revoke_sets_inactive(self): - """Revoke should set is_active to False.""" - db = _mock_db() - token = MagicMock() - token.id = uuid.uuid4() - token.user_id = "user-1" - token.is_active = True - with patch("app.services.platform_token_service.PlatformTokenRepository") as MockRepo: - MockRepo.return_value.get = AsyncMock(return_value=token) - from app.services.platform_token_service import PlatformTokenService - - service = PlatformTokenService(db) - await service.revoke_token(token_id=token.id, user_id="user-1") - assert token.is_active is False - - @pytest.mark.asyncio - async def test_revoke_wrong_user_denied(self): - """Should raise ForbiddenException when revoking another user's token.""" - from app.common.exceptions import ForbiddenException - - db = _mock_db() - token = MagicMock() - token.id = uuid.uuid4() - token.user_id = "user-1" - token.is_active = True - with patch("app.services.platform_token_service.PlatformTokenRepository") as MockRepo: - MockRepo.return_value.get = AsyncMock(return_value=token) - from app.services.platform_token_service import PlatformTokenService - - service = PlatformTokenService(db) - with pytest.raises(ForbiddenException): - await service.revoke_token(token_id=token.id, user_id="user-2") - - -class TestPlatformTokenServiceList: - """Test token listing with resource filtering.""" - - @pytest.mark.asyncio - async def test_list_tokens_delegates_to_repo(self): - """list_tokens should pass resource filters to repository.""" - db = _mock_db() - with patch("app.services.platform_token_service.PlatformTokenRepository") as MockRepo: - mock_tokens = [MagicMock(), MagicMock()] - MockRepo.return_value.list_by_user_and_resource = AsyncMock(return_value=mock_tokens) - from app.services.platform_token_service import PlatformTokenService - - service = PlatformTokenService(db) - rid = uuid.uuid4() - result = await service.list_tokens( - user_id="user-1", - resource_type="skill", - resource_id=rid, - ) - assert result == mock_tokens - MockRepo.return_value.list_by_user_and_resource.assert_called_once_with("user-1", "skill", rid) - - @pytest.mark.asyncio - async def test_list_tokens_no_filters(self): - """list_tokens with no filters passes None values.""" - db = _mock_db() - with patch("app.services.platform_token_service.PlatformTokenRepository") as MockRepo: - MockRepo.return_value.list_by_user_and_resource = AsyncMock(return_value=[]) - from app.services.platform_token_service import PlatformTokenService - - service = PlatformTokenService(db) - result = await service.list_tokens(user_id="user-1") - assert result == [] - MockRepo.return_value.list_by_user_and_resource.assert_called_once_with("user-1", None, None) - - -class TestPlatformTokenServiceRevokeByResource: - """Test bulk revocation by resource.""" - - @pytest.mark.asyncio - async def test_revoke_by_resource_delegates_to_repo(self): - """revoke_by_resource should delegate to repo.deactivate_by_resource.""" - db = _mock_db() - with patch("app.services.platform_token_service.PlatformTokenRepository") as MockRepo: - MockRepo.return_value.deactivate_by_resource = AsyncMock(return_value=3) - from app.services.platform_token_service import PlatformTokenService - - service = PlatformTokenService(db) - count = await service.revoke_by_resource("skill", "skill-123") - assert count == 3 - MockRepo.return_value.deactivate_by_resource.assert_called_once_with("skill", "skill-123") - - -class TestPlatformTokenServiceValidation: - """Test resource_type/resource_id pair and resource_type validation.""" - - @pytest.mark.asyncio - async def test_create_rejects_resource_type_without_resource_id(self): - """Should reject when resource_type is set but resource_id is None.""" - from app.common.exceptions import BadRequestException - - db = _mock_db() - with patch("app.services.platform_token_service.PlatformTokenRepository") as MockRepo: - MockRepo.return_value.count_active_by_user = AsyncMock(return_value=0) - from app.services.platform_token_service import PlatformTokenService - - service = PlatformTokenService(db) - with pytest.raises(BadRequestException, match="must both be provided"): - await service.create_token( - user_id="user-1", - name="test", - scopes=["skills:read"], - resource_type="skill", - resource_id=None, - ) - - @pytest.mark.asyncio - async def test_create_rejects_resource_id_without_resource_type(self): - """Should reject when resource_id is set but resource_type is None.""" - from app.common.exceptions import BadRequestException - - db = _mock_db() - with patch("app.services.platform_token_service.PlatformTokenRepository") as MockRepo: - MockRepo.return_value.count_active_by_user = AsyncMock(return_value=0) - from app.services.platform_token_service import PlatformTokenService - - service = PlatformTokenService(db) - with pytest.raises(BadRequestException, match="must both be provided"): - await service.create_token( - user_id="user-1", - name="test", - scopes=["skills:read"], - resource_type=None, - resource_id=uuid.uuid4(), - ) - - @pytest.mark.asyncio - async def test_create_rejects_invalid_resource_type(self): - """Should reject unknown resource_type.""" - from app.common.exceptions import BadRequestException - - db = _mock_db() - with patch("app.services.platform_token_service.PlatformTokenRepository") as MockRepo: - MockRepo.return_value.count_active_by_user = AsyncMock(return_value=0) - from app.services.platform_token_service import PlatformTokenService - - service = PlatformTokenService(db) - with pytest.raises(BadRequestException, match="Invalid resource_type"): - await service.create_token( - user_id="user-1", - name="test", - scopes=["skills:read"], - resource_type="unknown", - resource_id=uuid.uuid4(), - ) - - @pytest.mark.asyncio - async def test_create_accepts_valid_resource_binding(self): - """Should succeed with valid resource_type + resource_id pair.""" - db = _mock_db() - skill_id = uuid.uuid4() - with ( - patch("app.services.platform_token_service.PlatformTokenRepository") as MockRepo, - patch("app.repositories.skill.SkillRepository.get", new_callable=AsyncMock) as mock_get, - patch("app.common.skill_permissions.check_skill_access", new_callable=AsyncMock) as mock_check, - ): - MockRepo.return_value.count_active_by_user = AsyncMock(return_value=0) - mock_skill = MagicMock() - mock_skill.id = skill_id - mock_skill.owner_id = "user-1" - mock_get.return_value = mock_skill - mock_check.return_value = None - from app.services.platform_token_service import PlatformTokenService - - service = PlatformTokenService(db) - record, raw_token = await service.create_token( - user_id="user-1", - name="test", - scopes=["skills:execute"], - resource_type="skill", - resource_id=skill_id, - ) - assert raw_token.startswith("sk_") diff --git a/backend/tests/test_services/test_run_reducers.py b/backend/tests/test_services/test_run_reducers.py deleted file mode 100644 index b8e3cfde3..000000000 --- a/backend/tests/test_services/test_run_reducers.py +++ /dev/null @@ -1,38 +0,0 @@ -from app.services.agent_registry import agent_registry - - -def test_skill_creator_definition_builds_initial_projection() -> None: - definition = agent_registry.get("skill_creator") - - projection = definition.make_initial_projection( - { - "graph_id": "graph-123", - "thread_id": "thread-456", - "edit_skill_id": "skill-789", - }, - status="queued", - ) - - assert projection["run_type"] == "skill_creator" - assert projection["status"] == "queued" - assert projection["graph_id"] == "graph-123" - assert projection["thread_id"] == "thread-456" - assert projection["edit_skill_id"] == "skill-789" - - -def test_skill_creator_definition_reducer_updates_preview_payload() -> None: - definition = agent_registry.get("skill_creator") - projection = definition.make_initial_projection({}, status="running") - - next_projection = definition.reducer( - projection, - event_type="tool_end", - payload={ - "message_id": "msg-ai-1", - "tool_name": "preview_skill", - "tool_output": {"name": "network-scan"}, - }, - status="running", - ) - - assert next_projection["preview_data"] == {"name": "network-scan"} diff --git a/backend/tests/test_services/test_skill_permissions.py b/backend/tests/test_services/test_skill_permissions.py deleted file mode 100644 index 018f8c5c9..000000000 --- a/backend/tests/test_services/test_skill_permissions.py +++ /dev/null @@ -1,99 +0,0 @@ -"""Tests for check_skill_access unified permission check.""" - -import uuid -from unittest.mock import AsyncMock, MagicMock, patch - -import pytest - -from app.common.exceptions import ForbiddenException -from app.common.skill_permissions import check_skill_access -from app.models.skill_collaborator import CollaboratorRole - - -def _make_skill(owner_id="owner-1", is_public=False): - skill = MagicMock() - skill.id = uuid.uuid4() - skill.owner_id = owner_id - skill.is_public = is_public - return skill - - -@pytest.mark.asyncio -async def test_superuser_always_passes(): - skill = _make_skill(owner_id="other") - db = AsyncMock() - await check_skill_access(db, skill, "super-1", CollaboratorRole.admin, is_superuser=True) - - -@pytest.mark.asyncio -async def test_owner_always_passes(): - skill = _make_skill(owner_id="owner-1") - db = AsyncMock() - await check_skill_access(db, skill, "owner-1", CollaboratorRole.admin) - - -@pytest.mark.asyncio -async def test_collaborator_with_sufficient_role(): - skill = _make_skill(owner_id="other") - db = AsyncMock() - mock_collab = MagicMock() - mock_collab.role = CollaboratorRole.editor - with patch("app.common.skill_permissions._get_collaborator", return_value=mock_collab): - await check_skill_access(db, skill, "user-1", CollaboratorRole.editor) - - -@pytest.mark.asyncio -async def test_collaborator_with_insufficient_role(): - skill = _make_skill(owner_id="other") - db = AsyncMock() - mock_collab = MagicMock() - mock_collab.role = CollaboratorRole.viewer - with patch("app.common.skill_permissions._get_collaborator", return_value=mock_collab): - with pytest.raises(ForbiddenException): - await check_skill_access(db, skill, "user-1", CollaboratorRole.editor) - - -@pytest.mark.asyncio -async def test_public_skill_viewer_access(): - skill = _make_skill(owner_id="other", is_public=True) - db = AsyncMock() - with patch("app.common.skill_permissions._get_collaborator", return_value=None): - await check_skill_access(db, skill, "user-1", CollaboratorRole.viewer) - - -@pytest.mark.asyncio -async def test_public_skill_editor_access_denied(): - skill = _make_skill(owner_id="other", is_public=True) - db = AsyncMock() - with patch("app.common.skill_permissions._get_collaborator", return_value=None): - with pytest.raises(ForbiddenException): - await check_skill_access(db, skill, "user-1", CollaboratorRole.editor) - - -@pytest.mark.asyncio -async def test_token_scope_check_passes(): - skill = _make_skill(owner_id="owner-1") - db = AsyncMock() - await check_skill_access( - db, - skill, - "owner-1", - CollaboratorRole.viewer, - token_scopes=["skills:read"], - required_scope="skills:read", - ) - - -@pytest.mark.asyncio -async def test_token_scope_check_fails(): - skill = _make_skill(owner_id="owner-1") - db = AsyncMock() - with pytest.raises(ForbiddenException): - await check_skill_access( - db, - skill, - "owner-1", - CollaboratorRole.viewer, - token_scopes=["skills:read"], - required_scope="skills:write", - ) diff --git a/backend/tests/test_services/test_skill_version_service.py b/backend/tests/test_services/test_skill_version_service.py deleted file mode 100644 index c9e2aeca3..000000000 --- a/backend/tests/test_services/test_skill_version_service.py +++ /dev/null @@ -1,92 +0,0 @@ -"""Unit tests for SkillVersionService.""" - -from __future__ import annotations - -import uuid -from unittest.mock import AsyncMock, MagicMock, patch - -import pytest - -from app.services.skill_version_service import SkillVersionService - - -def _mock_skill(owner_id="user-1", name="test-skill", content="body"): - s = MagicMock() - s.id = uuid.uuid4() - s.owner_id = owner_id - s.name = name - s.description = "desc" - s.content = content - s.tags = ["a"] - s.meta_data = {} - s.allowed_tools = [] - s.compatibility = None - s.license = None - s.is_public = False - s.files = [] - return s - - -def _mock_db(): - db = AsyncMock() - db.commit = AsyncMock() - db.flush = AsyncMock() - db.refresh = AsyncMock() - db.add = MagicMock() - return db - - -class TestSkillVersionServicePublish: - """Test version publishing logic.""" - - @pytest.mark.asyncio - async def test_publish_validates_semver_format(self): - """Invalid semver should raise BadRequestException.""" - from app.common.exceptions import BadRequestException - - db = _mock_db() - skill = _mock_skill() - with patch.object(SkillVersionService, "__init__", lambda self, db: None): - service = SkillVersionService.__new__(SkillVersionService) - service.db = db - service.skill_repo = MagicMock() - service.skill_repo.get_with_files = AsyncMock(return_value=skill) - service.repo = MagicMock() - service.file_repo = MagicMock() - service.skill_file_repo = MagicMock() - - with patch("app.services.skill_version_service.check_skill_access", new_callable=AsyncMock): - with pytest.raises(BadRequestException, match="Invalid version"): - await service.publish_version( - skill_id=skill.id, - current_user_id="user-1", - version_str="invalid", - release_notes="", - ) - - @pytest.mark.asyncio - async def test_publish_rejects_lower_version(self): - """New version must be greater than existing highest.""" - from app.common.exceptions import BadRequestException - - db = _mock_db() - skill = _mock_skill() - - with patch.object(SkillVersionService, "__init__", lambda self, db: None): - service = SkillVersionService.__new__(SkillVersionService) - service.db = db - service.skill_repo = MagicMock() - service.skill_repo.get_with_files = AsyncMock(return_value=skill) - service.repo = MagicMock() - service.repo.get_highest_version_str = AsyncMock(return_value="2.0.0") - service.file_repo = MagicMock() - service.skill_file_repo = MagicMock() - - with patch("app.services.skill_version_service.check_skill_access", new_callable=AsyncMock): - with pytest.raises(BadRequestException, match="greater"): - await service.publish_version( - skill_id=skill.id, - current_user_id="user-1", - version_str="1.0.0", - release_notes="", - ) diff --git a/backend/tests/test_tools/__init__.py b/backend/tests/test_tools/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/backend/tests/test_tools/test_preview_skill.py b/backend/tests/test_tools/test_preview_skill.py deleted file mode 100644 index 964e9291d..000000000 --- a/backend/tests/test_tools/test_preview_skill.py +++ /dev/null @@ -1,304 +0,0 @@ -"""Tests for preview_skill_in_sandbox builtin tool.""" - -import json -from pathlib import Path - -import pytest - -from app.core.tools.builtin.preview_skill import preview_skill_in_sandbox - - -@pytest.fixture -def sandbox_root(tmp_path: Path) -> str: - """Create a temporary sandbox root directory.""" - return str(tmp_path) - - -def _make_skill(sandbox_root: str, skill_name: str, files: dict[str, str], skills_subdir: str = "skills") -> Path: - """Helper to create a skill directory with given files. - - Args: - sandbox_root: Root of the sandbox. - skill_name: Name of the skill directory. - files: Mapping of relative file path -> content. - skills_subdir: Subdirectory under sandbox_root for skills. - - Returns: - Path to the created skill directory. - """ - skill_dir = Path(sandbox_root) / skills_subdir / skill_name - for rel_path, content in files.items(): - file_path = skill_dir / rel_path - file_path.parent.mkdir(parents=True, exist_ok=True) - file_path.write_text(content, encoding="utf-8") - return skill_dir - - -# --------------------------------------------------------------------------- -# Happy path -# --------------------------------------------------------------------------- - - -def test_valid_skill_returns_correct_structure(sandbox_root: str): - """A well-formed skill with SKILL.md should pass validation.""" - skill_md = ( - "---\nname: hello-world\ndescription: A simple greeting skill\n---\n\n# Hello World\nThis skill says hello.\n" - ) - _make_skill( - sandbox_root, - "hello-world", - { - "SKILL.md": skill_md, - "main.py": "print('hello')\n", - }, - ) - - result_str = preview_skill_in_sandbox("hello-world", sandbox_root) - result = json.loads(result_str) - - assert result["skill_name"] == "hello-world" - assert result["validation"]["valid"] is True - assert result["validation"]["errors"] == [] - - # Check files are included - file_paths = [f["path"] for f in result["files"]] - assert "SKILL.md" in file_paths - assert "main.py" in file_paths - - # Check file metadata - skill_md_file = next(f for f in result["files"] if f["path"] == "SKILL.md") - assert skill_md_file["file_type"] == "markdown" - assert skill_md_file["size"] > 0 - assert "Hello World" in skill_md_file["content"] - - main_py_file = next(f for f in result["files"] if f["path"] == "main.py") - assert main_py_file["file_type"] == "python" - - -def test_valid_skill_with_nested_files(sandbox_root: str): - """Nested subdirectory files should be included with relative paths.""" - skill_md = "---\nname: nested-skill\ndescription: Skill with nested files\n---\n\nBody text.\n" - _make_skill( - sandbox_root, - "nested-skill", - { - "SKILL.md": skill_md, - "src/utils.py": "def helper(): pass\n", - "src/data/config.json": '{"key": "value"}\n', - }, - ) - - result = json.loads(preview_skill_in_sandbox("nested-skill", sandbox_root)) - - assert result["validation"]["valid"] is True - paths = sorted(f["path"] for f in result["files"]) - assert "SKILL.md" in paths - assert "src/utils.py" in paths - assert "src/data/config.json" in paths - - json_file = next(f for f in result["files"] if f["path"] == "src/data/config.json") - assert json_file["file_type"] == "json" - - -def test_custom_skills_subdir(sandbox_root: str): - """Should respect a custom skills_subdir parameter.""" - skill_md = "---\nname: custom-dir\ndescription: test\n---\nBody.\n" - _make_skill(sandbox_root, "custom-dir", {"SKILL.md": skill_md}, skills_subdir="my_skills") - - result = json.loads(preview_skill_in_sandbox("custom-dir", sandbox_root, skills_subdir="my_skills")) - assert result["validation"]["valid"] is True - assert result["skill_name"] == "custom-dir" - - -def test_default_lookup_finds_thread_scoped_skill_dir(sandbox_root: str): - """Default lookup should find a skill under /skills when unambiguous.""" - skill_md = "---\nname: thread-dir\ndescription: test\n---\nBody.\n" - _make_skill(sandbox_root, "thread-dir", {"SKILL.md": skill_md}, skills_subdir="thread-123/skills") - - result = json.loads(preview_skill_in_sandbox("thread-dir", sandbox_root)) - - assert result["validation"]["valid"] is True - assert result["skill_name"] == "thread-dir" - - -# --------------------------------------------------------------------------- -# File type detection -# --------------------------------------------------------------------------- - - -def test_file_type_detection(sandbox_root: str): - """Various file extensions should map to correct file_type values.""" - skill_md = "---\nname: types-test\ndescription: test types\n---\nBody.\n" - _make_skill( - sandbox_root, - "types-test", - { - "SKILL.md": skill_md, - "script.py": "pass", - "config.yaml": "key: val", - "config.yml": "key: val", - "data.json": "{}", - "readme.txt": "hello", - "run.sh": "#!/bin/bash", - "style.css": "body {}", - "page.html": "", - "app.js": "console.log(1)", - "app.ts": "const x = 1", - "unknown.xyz": "stuff", - }, - ) - - result = json.loads(preview_skill_in_sandbox("types-test", sandbox_root)) - type_map = {f["path"]: f["file_type"] for f in result["files"]} - - assert type_map["script.py"] == "python" - assert type_map["config.yaml"] == "yaml" - assert type_map["config.yml"] == "yaml" - assert type_map["data.json"] == "json" - assert type_map["readme.txt"] == "text" - assert type_map["run.sh"] == "shell" - assert type_map["style.css"] == "css" - assert type_map["page.html"] == "html" - assert type_map["app.js"] == "javascript" - assert type_map["app.ts"] == "typescript" - assert type_map["unknown.xyz"] == "other" - - -# --------------------------------------------------------------------------- -# Error cases -# --------------------------------------------------------------------------- - - -def test_skill_directory_not_found(sandbox_root: str): - """Missing skill directory should produce valid=False with error.""" - result = json.loads(preview_skill_in_sandbox("nonexistent", sandbox_root)) - - assert result["skill_name"] == "nonexistent" - assert result["validation"]["valid"] is False - assert any("not found" in e.lower() for e in result["validation"]["errors"]) - assert result["files"] == [] - - -def test_missing_skill_md(sandbox_root: str): - """A skill directory without SKILL.md should produce valid=False.""" - _make_skill( - sandbox_root, - "no-readme", - { - "main.py": "print('hi')\n", - }, - ) - - result = json.loads(preview_skill_in_sandbox("no-readme", sandbox_root)) - - assert result["validation"]["valid"] is False - assert any("SKILL.md" in e for e in result["validation"]["errors"]) - # Other files should still be listed - assert len(result["files"]) == 1 - - -def test_bad_yaml_frontmatter(sandbox_root: str): - """Invalid YAML frontmatter should produce valid=False.""" - bad_skill_md = "---\nname: [invalid yaml\n---\nBody.\n" - _make_skill(sandbox_root, "bad-yaml", {"SKILL.md": bad_skill_md}) - - result = json.loads(preview_skill_in_sandbox("bad-yaml", sandbox_root)) - - assert result["validation"]["valid"] is False - assert any("frontmatter" in e.lower() or "name" in e.lower() for e in result["validation"]["errors"]) - - -def test_missing_name_in_frontmatter(sandbox_root: str): - """Frontmatter without 'name' should produce a validation error.""" - skill_md = "---\ndescription: no name here\n---\nBody.\n" - _make_skill(sandbox_root, "no-name", {"SKILL.md": skill_md}) - - result = json.loads(preview_skill_in_sandbox("no-name", sandbox_root)) - - assert result["validation"]["valid"] is False - assert any("name" in e.lower() for e in result["validation"]["errors"]) - - -def test_missing_description_in_frontmatter(sandbox_root: str): - """Frontmatter without 'description' should produce a validation error.""" - skill_md = "---\nname: no-desc\n---\nBody.\n" - _make_skill(sandbox_root, "no-desc", {"SKILL.md": skill_md}) - - result = json.loads(preview_skill_in_sandbox("no-desc", sandbox_root)) - - assert result["validation"]["valid"] is False - assert any("description" in e.lower() for e in result["validation"]["errors"]) - - -def test_invalid_skill_name_format(sandbox_root: str): - """Skill name that violates naming rules should produce validation error.""" - skill_md = "---\nname: Invalid_Name!\ndescription: bad name\n---\nBody.\n" - _make_skill(sandbox_root, "bad-name", {"SKILL.md": skill_md}) - - result = json.loads(preview_skill_in_sandbox("bad-name", sandbox_root)) - - assert result["validation"]["valid"] is False - assert any("name" in e.lower() for e in result["validation"]["errors"]) - - -def test_description_too_long(sandbox_root: str): - """Description exceeding max length should produce validation error.""" - long_desc = "x" * 1025 - skill_md = f"---\nname: long-desc\ndescription: {long_desc}\n---\nBody.\n" - _make_skill(sandbox_root, "long-desc", {"SKILL.md": skill_md}) - - result = json.loads(preview_skill_in_sandbox("long-desc", sandbox_root)) - - assert result["validation"]["valid"] is False - assert any("description" in e.lower() for e in result["validation"]["errors"]) - - -# --------------------------------------------------------------------------- -# Warnings -# --------------------------------------------------------------------------- - - -def test_empty_body_produces_warning(sandbox_root: str): - """SKILL.md with empty body (no content after frontmatter) should warn.""" - skill_md = "---\nname: empty-body\ndescription: has no body\n---\n" - _make_skill(sandbox_root, "empty-body", {"SKILL.md": skill_md}) - - result = json.loads(preview_skill_in_sandbox("empty-body", sandbox_root)) - - # Still valid, but warning present - assert result["validation"]["valid"] is True - assert any("body" in w.lower() or "empty" in w.lower() for w in result["validation"]["warnings"]) - - -# --------------------------------------------------------------------------- -# Edge cases -# --------------------------------------------------------------------------- - - -def test_system_files_are_excluded(sandbox_root: str): - """System files like .DS_Store should be excluded from file list.""" - skill_md = "---\nname: sys-files\ndescription: test system files\n---\nBody.\n" - _make_skill( - sandbox_root, - "sys-files", - { - "SKILL.md": skill_md, - ".DS_Store": "binary garbage", - "__pycache__/module.pyc": "bytecode", - }, - ) - - result = json.loads(preview_skill_in_sandbox("sys-files", sandbox_root)) - file_paths = [f["path"] for f in result["files"]] - - assert ".DS_Store" not in file_paths - # __pycache__ files should also be filtered - assert not any("__pycache__" in p for p in file_paths) - - -def test_returns_valid_json_string(sandbox_root: str): - """The function should always return a valid JSON string.""" - # Even for missing skill - result_str = preview_skill_in_sandbox("missing", sandbox_root) - assert isinstance(result_str, str) - json.loads(result_str) # Should not raise diff --git a/backend/tests/test_utils/__init__.py b/backend/tests/test_utils/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/backend/tests/test_utils/test_file_event_emitter.py b/backend/tests/test_utils/test_file_event_emitter.py deleted file mode 100644 index 8ba7c895c..000000000 --- a/backend/tests/test_utils/test_file_event_emitter.py +++ /dev/null @@ -1,41 +0,0 @@ -from app.utils.file_event_emitter import FileEventEmitter - - -def test_emit_and_drain(): - emitter = FileEventEmitter() - emitter.emit("write", "/app/hello.py", size=42) - emitter.emit("edit", "/app/hello.py") - events = emitter.drain() - assert len(events) == 2 - assert events[0].action == "write" - assert events[0].path == "/app/hello.py" - assert events[0].size == 42 - assert events[1].action == "edit" - assert events[1].size is None - - -def test_drain_empties_queue(): - emitter = FileEventEmitter() - emitter.emit("write", "/app/a.py") - emitter.drain() - assert emitter.drain() == [] - - -def test_drain_no_loss_under_interleave(): - """Simulate emit during drain - popleft loop should not lose events.""" - emitter = FileEventEmitter() - emitter.emit("write", "/app/a.py") - emitter.emit("write", "/app/b.py") - events = emitter.drain() - assert len(events) == 2 - emitter.emit("write", "/app/c.py") - events2 = emitter.drain() - assert len(events2) == 1 - assert events2[0].path == "/app/c.py" - - -def test_file_event_has_timestamp(): - emitter = FileEventEmitter() - emitter.emit("write", "/app/a.py") - events = emitter.drain() - assert events[0].timestamp > 0 diff --git a/backend/uv.lock b/backend/uv.lock index 32f34e043..1b58ddf2a 100644 --- a/backend/uv.lock +++ b/backend/uv.lock @@ -71,28 +71,6 @@ wheels = [ { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/5d/28/a8a9fc6957b2cee8902414e41816b5ab5536ecf43c3b1843c10e82c559b2/aiohttp-3.13.2-cp313-cp313-win_amd64.whl", hash = "sha256:a88d13e7ca367394908f8a276b89d04a3652044612b9a408a0bb22a5ed976a1a", size = 452192, upload-time = "2025-10-28T20:57:34.166Z" }, ] -[[package]] -name = "aioquic" -version = "1.2.0" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -dependencies = [ - { name = "certifi" }, - { name = "cryptography" }, - { name = "pylsqpack" }, - { name = "pyopenssl" }, - { name = "service-identity" }, -] -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/4b/1a/bf10b2c57c06c7452b685368cb1ac90565a6e686e84ec6f84465fb8f78f4/aioquic-1.2.0.tar.gz", hash = "sha256:f91263bb3f71948c5c8915b4d50ee370004f20a416f67fab3dcc90556c7e7199", size = 179891, upload-time = "2024-07-06T23:27:09.301Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/19/03/1c385739e504c70ab2a66a4bc0e7cd95cee084b374dcd4dc97896378400b/aioquic-1.2.0-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:3e23964dfb04526ade6e66f5b7cd0c830421b8138303ab60ba6e204015e7cb0b", size = 1753473, upload-time = "2024-07-06T23:26:20.809Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/6a/1f/4d1c40714db65be828e1a1e2cce7f8f4b252be67d89f2942f86a1951826c/aioquic-1.2.0-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:84d733332927b76218a3b246216104116f766f5a9b2308ec306cd017b3049660", size = 2083563, upload-time = "2024-07-06T23:26:24.254Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/15/48/56a8c9083d1deea4ccaf1cbf5a91a396b838b4a0f8650f4e9f45c7879a38/aioquic-1.2.0-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2466499759b31ea4f1d17f4aeb1f8d4297169e05e3c1216d618c9757f4dd740d", size = 2555697, upload-time = "2024-07-06T23:26:26.16Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/0f/93/fa4c981a8a8a903648d4cd6e12c0fca7f44e3ef4ba15a8b99a26af05b868/aioquic-1.2.0-cp38-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cd75015462ca5070a888110dc201f35a9f4c7459f9201b77adc3c06013611bb8", size = 2149089, upload-time = "2024-07-06T23:26:28.277Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b0/0f/4a280923313b831892caaa45348abea89e7dd2e4422a86699bb0e506b1dd/aioquic-1.2.0-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:43ae3b11d43400a620ca0b4b4885d12b76a599c2cbddba755f74bebfa65fe587", size = 2205221, upload-time = "2024-07-06T23:26:30.682Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d2/6b/a6a1d1762ce06f13b68f524bb9c5f4d6ca7cda9b072d7e744626b89b77be/aioquic-1.2.0-cp38-abi3-win32.whl", hash = "sha256:910d8c91da86bba003d491d15deaeac3087d1b9d690b9edc1375905d8867b742", size = 1214037, upload-time = "2024-07-06T23:26:32.651Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/dd/aa/e8a8a75c93dee0ab229df3c2d17f63cd44d0ad5ee8540e2ec42779ce3a39/aioquic-1.2.0-cp38-abi3-win_amd64.whl", hash = "sha256:e3dcfb941004333d477225a6689b55fc7f905af5ee6a556eb5083be0354e653a", size = 1530339, upload-time = "2024-07-06T23:26:34.753Z" }, -] - [[package]] name = "aiosignal" version = "1.4.0" @@ -129,54 +107,6 @@ wheels = [ { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ba/88/6237e97e3385b57b5f1528647addea5cc03d4d65d5979ab24327d41fb00d/alembic-1.17.2-py3-none-any.whl", hash = "sha256:f483dd1fe93f6c5d49217055e4d15b905b425b6af906746abb35b69c1996c4e6", size = 248554, upload-time = "2025-11-14T20:35:05.699Z" }, ] -[[package]] -name = "angr" -version = "9.2.191" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -dependencies = [ - { name = "archinfo" }, - { name = "cachetools" }, - { name = "capstone" }, - { name = "cffi" }, - { name = "claripy" }, - { name = "cle" }, - { name = "colorama", marker = "sys_platform == 'win32'" }, - { name = "cxxheaderparser" }, - { name = "gitpython" }, - { name = "msgspec" }, - { name = "mulpyplexer" }, - { name = "networkx" }, - { name = "protobuf" }, - { name = "psutil" }, - { name = "pycparser" }, - { name = "pydemumble" }, - { name = "pyformlang" }, - { name = "pypcode" }, - { name = "pyvex" }, - { name = "rich" }, - { name = "sortedcontainers" }, - { name = "sympy" }, - { name = "typing-extensions" }, - { name = "unique-log-filter" }, -] -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/72/bd/9ed2df50022dd815386d150b1ae2a35279bd445aa56c79768fc1c0f84872/angr-9.2.191.tar.gz", hash = "sha256:64105292abbfe39bec53f3165231e955d84e19fad48d1aec45c0d0194786469a", size = 3353580, upload-time = "2025-12-30T17:44:24.144Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ab/70/a06ce0887923c1bc2461f6840bbb4a571ac3f9297d45f622a0cf45d39a4c/angr-9.2.191-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:e4df766688bbdacdd053ccae6faf577033af750e4b5b5027f8a90bb08310a084", size = 5989071, upload-time = "2025-12-30T17:42:57.589Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/6b/56/17cd22f6bc110b125c0e4bbc7cd04b0bafa607de2a49883bf34233814d51/angr-9.2.191-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:362b8f76c3f7bcd09b0359132065f469af78123c8c92b5866a3db3446dabc707", size = 5819341, upload-time = "2025-12-30T17:42:59.391Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/66/b3/7c5f2cc85eb05b862035d8edb6426acd1fb8426f0b4c7e5793c924e0e669/angr-9.2.191-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03d21f0e5f25629eae63910a62029d9075165f3b4ae46cda459d245aa12e6e30", size = 6185672, upload-time = "2025-12-30T17:43:01.386Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/23/bf/0a2fffced77260ca321af7509fc8379c239c8a621c1bcd21a26a67cdf183/angr-9.2.191-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:3ff7b42213650cc4d81a15c435279bfbf7276ada401c1c1056e3fe602d629f27", size = 6011388, upload-time = "2025-12-30T17:43:03.27Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/44/e0/8af0259222bd4d545867991be8ba4a3d4f00ab942bb5687494b29e4db247/angr-9.2.191-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:dee437c32cdd8bac3f34f71afa97c5b7b36d92dd4757a342b6569fa0555bbe6b", size = 6992468, upload-time = "2025-12-30T17:43:05.146Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/17/ba/7be963318e037f32572fcd33ed9b1cbe504c6bdb0b10e7e8ae983f2b4ec2/angr-9.2.191-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ec21fe4de896cca191e083b40c442ebe96075b24e0eaf58a1d53d8b1a5e8021f", size = 7177599, upload-time = "2025-12-30T17:43:07.11Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/5f/79/324dc9e2d27b37c54cbff7fd671d33e1b0803fbe78ae1907849bd05a8aa7/angr-9.2.191-cp312-cp312-win_amd64.whl", hash = "sha256:81aa44fd30ccc8f00a20c1f8d695236372ac8c5a1dd5b745653f0909f8a345bc", size = 5789378, upload-time = "2025-12-30T17:43:08.57Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/74/fa/baabac8ada517e16fa70e7384e6d99cbfb5919cf3dca7c109d98d0a4f090/angr-9.2.191-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:3c2ccc3ea148851c18eda4546dada965abea40bd703a9a68fae4002808d81f0a", size = 5989143, upload-time = "2025-12-30T17:43:10.235Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/6b/1f/00ec8f2ebb6b2134ef697bf63c5488657922a395e0dff49553d530dbce3b/angr-9.2.191-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:07d01dff30f0f9bd7cfc55e8460ec7936e977b7f01e567aae69f3c82842074ec", size = 5819108, upload-time = "2025-12-30T17:43:11.707Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/63/49/f5ad08fb8b203f9185c5b65572510aacb133ae38f4ff56cfa45f43ada3b1/angr-9.2.191-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d91335e18c9d873a7334195b32cfd08cbc3a089d1f5b38ee3ba0f19bf3c29f11", size = 6185183, upload-time = "2025-12-30T17:43:13.24Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/84/cb/eb474e665a74a38c0769cd5028ec6ce555655fdbd239f67e12ed6cf5f61d/angr-9.2.191-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:c27e349e6f2cb521264f5e7bc9e01ac7a2e313b2ee0174c5fc8f0af3ffd88e70", size = 6011554, upload-time = "2025-12-30T17:43:15.085Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/39/da/a5102669a633ded0951ef38a9571e4758c468d36d615fbb8566327d29e65/angr-9.2.191-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:322732ede7c289f656fb5d69282b37f3aead8e91b326e9aabe0f1f7acddf1e15", size = 6991995, upload-time = "2025-12-30T17:43:17.013Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d2/4f/9ff9737f70cb386acfd3a7876c3b3b9c7a74bbe581dfc9e301fb5070f06c/angr-9.2.191-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:26c4c8af3ee5a9bf59a8e498891c234b04e0627b7723be39c8e3dcf6d829adb4", size = 7177527, upload-time = "2025-12-30T17:43:18.966Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/55/de/2b8e0ad0511eaae3dfcd16bcdff7a644f41a3fbdf3ce913ec2193cc691b3/angr-9.2.191-cp313-cp313-win_amd64.whl", hash = "sha256:e590eace461015f01c063dbaa3d780e979a3c47ca1077d7b8b5fd57802466ec6", size = 5789129, upload-time = "2025-12-30T17:43:20.766Z" }, -] - [[package]] name = "annotated-doc" version = "0.0.4" @@ -227,63 +157,6 @@ wheels = [ { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/7f/9c/36c5c37947ebfb8c7f22e0eb6e4d188ee2d53aa3880f3f2744fb894f0cb1/anyio-4.12.0-py3-none-any.whl", hash = "sha256:dad2376a628f98eeca4881fc56cd06affd18f659b17a747d3ff0307ced94b1bb", size = 113362, upload-time = "2025-11-28T23:36:57.897Z" }, ] -[[package]] -name = "archinfo" -version = "9.2.191" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/58/a4/51fa73732b12b5ca88876fd5eeb9bdad95b1bc6d6047f45c2b2a22c46acd/archinfo-9.2.191.tar.gz", hash = "sha256:926d7cd036deb7630250fdaa6ba1b151b9ca18b08ac15b35e9429b7fe13b9eee", size = 39899, upload-time = "2025-12-30T17:44:27.16Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/70/0a/03817b0729c458986fd92566fa9200a8376e4c6e460845652f8f1e1cc312/archinfo-9.2.191-py3-none-any.whl", hash = "sha256:7e11a73f0f39b94db4e6ad35dedfc4e7c1b51f12accc88cd82b9acf1f0c05044", size = 48977, upload-time = "2025-12-30T17:43:29.367Z" }, -] - -[[package]] -name = "argon2-cffi" -version = "23.1.0" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -dependencies = [ - { name = "argon2-cffi-bindings" }, -] -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/31/fa/57ec2c6d16ecd2ba0cf15f3c7d1c3c2e7b5fcb83555ff56d7ab10888ec8f/argon2_cffi-23.1.0.tar.gz", hash = "sha256:879c3e79a2729ce768ebb7d36d4609e3a78a4ca2ec3a9f12286ca057e3d0db08", size = 42798, upload-time = "2023-08-15T14:13:12.711Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/a4/6a/e8a041599e78b6b3752da48000b14c8d1e8a04ded09c88c714ba047f34f5/argon2_cffi-23.1.0-py3-none-any.whl", hash = "sha256:c670642b78ba29641818ab2e68bd4e6a78ba53b7eff7b4c3815ae16abf91c7ea", size = 15124, upload-time = "2023-08-15T14:13:10.752Z" }, -] - -[[package]] -name = "argon2-cffi-bindings" -version = "25.1.0" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -dependencies = [ - { name = "cffi" }, -] -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/5c/2d/db8af0df73c1cf454f71b2bbe5e356b8c1f8041c979f505b3d3186e520a9/argon2_cffi_bindings-25.1.0.tar.gz", hash = "sha256:b957f3e6ea4d55d820e40ff76f450952807013d361a65d7f28acc0acbf29229d", size = 1783441, upload-time = "2025-07-30T10:02:05.147Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/1d/57/96b8b9f93166147826da5f90376e784a10582dd39a393c99bb62cfcf52f0/argon2_cffi_bindings-25.1.0-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:aecba1723ae35330a008418a91ea6cfcedf6d31e5fbaa056a166462ff066d500", size = 54121, upload-time = "2025-07-30T10:01:50.815Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/0a/08/a9bebdb2e0e602dde230bdde8021b29f71f7841bd54801bcfd514acb5dcf/argon2_cffi_bindings-25.1.0-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:2630b6240b495dfab90aebe159ff784d08ea999aa4b0d17efa734055a07d2f44", size = 29177, upload-time = "2025-07-30T10:01:51.681Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b6/02/d297943bcacf05e4f2a94ab6f462831dc20158614e5d067c35d4e63b9acb/argon2_cffi_bindings-25.1.0-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:7aef0c91e2c0fbca6fc68e7555aa60ef7008a739cbe045541e438373bc54d2b0", size = 31090, upload-time = "2025-07-30T10:01:53.184Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c1/93/44365f3d75053e53893ec6d733e4a5e3147502663554b4d864587c7828a7/argon2_cffi_bindings-25.1.0-cp39-abi3-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1e021e87faa76ae0d413b619fe2b65ab9a037f24c60a1e6cc43457ae20de6dc6", size = 81246, upload-time = "2025-07-30T10:01:54.145Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/09/52/94108adfdd6e2ddf58be64f959a0b9c7d4ef2fa71086c38356d22dc501ea/argon2_cffi_bindings-25.1.0-cp39-abi3-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d3e924cfc503018a714f94a49a149fdc0b644eaead5d1f089330399134fa028a", size = 87126, upload-time = "2025-07-30T10:01:55.074Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/72/70/7a2993a12b0ffa2a9271259b79cc616e2389ed1a4d93842fac5a1f923ffd/argon2_cffi_bindings-25.1.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:c87b72589133f0346a1cb8d5ecca4b933e3c9b64656c9d175270a000e73b288d", size = 80343, upload-time = "2025-07-30T10:01:56.007Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/78/9a/4e5157d893ffc712b74dbd868c7f62365618266982b64accab26bab01edc/argon2_cffi_bindings-25.1.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:1db89609c06afa1a214a69a462ea741cf735b29a57530478c06eb81dd403de99", size = 86777, upload-time = "2025-07-30T10:01:56.943Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/74/cd/15777dfde1c29d96de7f18edf4cc94c385646852e7c7b0320aa91ccca583/argon2_cffi_bindings-25.1.0-cp39-abi3-win32.whl", hash = "sha256:473bcb5f82924b1becbb637b63303ec8d10e84c8d241119419897a26116515d2", size = 27180, upload-time = "2025-07-30T10:01:57.759Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e2/c6/a759ece8f1829d1f162261226fbfd2c6832b3ff7657384045286d2afa384/argon2_cffi_bindings-25.1.0-cp39-abi3-win_amd64.whl", hash = "sha256:a98cd7d17e9f7ce244c0803cad3c23a7d379c301ba618a5fa76a67d116618b98", size = 31715, upload-time = "2025-07-30T10:01:58.56Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/42/b9/f8d6fa329ab25128b7e98fd83a3cb34d9db5b059a9847eddb840a0af45dd/argon2_cffi_bindings-25.1.0-cp39-abi3-win_arm64.whl", hash = "sha256:b0fdbcf513833809c882823f98dc2f931cf659d9a1429616ac3adebb49f5db94", size = 27149, upload-time = "2025-07-30T10:01:59.329Z" }, -] - -[[package]] -name = "arpy" -version = "1.1.1" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d2/15/09fbfc0d43b66a3845098377249539aa44edcd541e17eabe2e11b4e3855e/arpy-1.1.1.tar.gz", hash = "sha256:3ec36309d2234648ef8dcd2118fe7d81c30195087e0353473546583f3434e776", size = 6774, upload-time = "2013-06-30T21:31:27.411Z" } - -[[package]] -name = "asgiref" -version = "3.8.1" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/29/38/b3395cc9ad1b56d2ddac9970bc8f4141312dbaec28bc7c218b0dfafd0f42/asgiref-3.8.1.tar.gz", hash = "sha256:c343bd80a0bec947a9860adb4c432ffa7db769836c64238fc34bdc3fec84d590", size = 35186, upload-time = "2024-03-22T14:39:36.863Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/39/e3/893e8757be2612e6c266d9bb58ad2e3651524b5b40cf56761e985a28b13e/asgiref-3.8.1-py3-none-any.whl", hash = "sha256:3e1e3ecc849832fe52ccf2cb6686b7a55f82bb1d6aee72a58826471390335e47", size = 23828, upload-time = "2024-03-22T14:39:34.521Z" }, -] - [[package]] name = "asyncpg" version = "0.31.0" @@ -413,61 +286,6 @@ wheels = [ { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/1a/39/47f9197bdd44df24d67ac8893641e16f386c984a0619ef2ee4c51fbbc019/beautifulsoup4-4.14.3-py3-none-any.whl", hash = "sha256:0918bfe44902e6ad8d57732ba310582e98da931428d231a5ecb9e7c703a735bb", size = 107721, upload-time = "2025-11-30T15:08:24.087Z" }, ] -[[package]] -name = "bitarray" -version = "3.8.0" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/95/06/92fdc84448d324ab8434b78e65caf4fb4c6c90b4f8ad9bdd4c8021bfaf1e/bitarray-3.8.0.tar.gz", hash = "sha256:3eae38daffd77c9621ae80c16932eea3fb3a4af141fb7cc724d4ad93eff9210d", size = 151991, upload-time = "2025-11-02T21:41:15.117Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/82/a0/0c41d893eda756315491adfdbf9bc928aee3d377a7f97a8834d453aa5de1/bitarray-3.8.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f2fcbe9b3a5996b417e030aa33a562e7e20dfc86271e53d7e841fc5df16268b8", size = 148575, upload-time = "2025-11-02T21:39:25.718Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/0e/30/12ab2f4a4429bd844b419c37877caba93d676d18be71354fbbeb21d9f4cc/bitarray-3.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cd761d158f67e288fd0ebe00c3b158095ce80a4bc7c32b60c7121224003ba70d", size = 145454, upload-time = "2025-11-02T21:39:26.695Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/26/58/314b3e3f219533464e120f0c51ac5123e7b1c1b91f725a4073fb70c5a858/bitarray-3.8.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c394a3f055b49f92626f83c1a0b6d6cd2c628f1ccd72481c3e3c6aa4695f3b20", size = 332949, upload-time = "2025-11-02T21:39:27.801Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ea/ce/ca8c706bd8341c7a22dd92d2a528af71f7e5f4726085d93f81fd768cb03b/bitarray-3.8.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:969fd67de8c42affdb47b38b80f1eaa79ac0ef17d65407cdd931db1675315af1", size = 360599, upload-time = "2025-11-02T21:39:28.964Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ef/dc/aa181df85f933052d962804906b282acb433cb9318b08ec2aceb4ee34faf/bitarray-3.8.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:99d25aff3745c54e61ab340b98400c52ebec04290a62078155e0d7eb30380220", size = 371972, upload-time = "2025-11-02T21:39:30.228Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ff/d9/b805bfa158c7bcf4df0ac19b1be581b47e1ddb792c11023aed80a7058e78/bitarray-3.8.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e645b4c365d6f1f9e0799380ad6395268f3c3b898244a650aaeb8d9d27b74c35", size = 340303, upload-time = "2025-11-02T21:39:31.342Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/1f/42/5308cc97ea929e30727292617a3a88293470166851e13c9e3f16f395da55/bitarray-3.8.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2fa23fdb3beab313950bbb49674e8a161e61449332d3997089fe3944953f1b77", size = 330494, upload-time = "2025-11-02T21:39:32.769Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/4c/89/64f1596cb80433323efdbc8dcd0d6e57c40dfbe6ea3341623f34ec397edd/bitarray-3.8.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:165052a0e61c880f7093808a0c524ce1b3555bfa114c0dfb5c809cd07918a60d", size = 358123, upload-time = "2025-11-02T21:39:34.331Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/27/fd/f3d49c5443b57087f888b5e118c8dd78bb7c8e8cfeeed250f8e92128a05f/bitarray-3.8.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:337c8cd46a4c6568d367ed676cbf2d7de16f890bb31dbb54c44c1d6bb6d4a1de", size = 356046, upload-time = "2025-11-02T21:39:35.449Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/aa/db/1fd0b402bd2b47142e958b6930dbb9445235d03fa703c9a24caa6e576ae2/bitarray-3.8.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:21ca6a47bf20db9e7ad74ca04b3d479e4d76109b68333eb23535553d2705339e", size = 336872, upload-time = "2025-11-02T21:39:36.891Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/58/73/680b47718f1313b4538af479c4732eaca0aeda34d93fc5b869f87932d57d/bitarray-3.8.0-cp312-cp312-win32.whl", hash = "sha256:178c5a4c7fdfb5cd79e372ae7f675390e670f3732e5bc68d327e01a5b3ff8d55", size = 143025, upload-time = "2025-11-02T21:39:38.303Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f8/11/7792587c19c79a8283e8838f44709fa4338a8f7d2a3091dfd81c07ae89c7/bitarray-3.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:75a3b6e9c695a6570ea488db75b84bb592ff70a944957efa1c655867c575018b", size = 149969, upload-time = "2025-11-02T21:39:39.715Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/9a/00/9df64b5d8a84e8e9ec392f6f9ce93f50626a5b301cb6c6b3fe3406454d66/bitarray-3.8.0-cp312-cp312-win_arm64.whl", hash = "sha256:5591daf81313096909d973fb2612fccd87528fdfdd39f6478bdce54543178954", size = 146907, upload-time = "2025-11-02T21:39:40.815Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/3e/35/480364d4baf1e34c79076750914664373f561c58abb5c31c35b3fae613ff/bitarray-3.8.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:18214bac86341f1cc413772e66447d6cca10981e2880b70ecaf4e826c04f95e9", size = 148582, upload-time = "2025-11-02T21:39:42.268Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/5e/a8/718b95524c803937f4edbaaf6480f39c80f6ed189d61357b345e8361ffb6/bitarray-3.8.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:01c5f0dc080b0ebb432f7a68ee1e88a76bd34f6d89c9568fcec65fb16ed71f0e", size = 145433, upload-time = "2025-11-02T21:39:43.552Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/03/66/4a10f30dc9e2e01e3b4ecd44a511219f98e63c86b0e0f704c90fac24059b/bitarray-3.8.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:86685fa04067f7175f9718489ae755f6acde03593a1a9ca89305554af40e14fd", size = 332986, upload-time = "2025-11-02T21:39:44.656Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/53/25/4c08774d847f80a1166e4c704b4e0f1c417c0afe6306eae0bc5e70d35faa/bitarray-3.8.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:56896ceeffe25946c4010320629e2d858ca763cd8ded273c81672a5edbcb1e0a", size = 360634, upload-time = "2025-11-02T21:39:45.798Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/a5/8f/bf8ad26169ebd0b2746d5c7564db734453ca467f8aab87e9d43b0a794383/bitarray-3.8.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:9858dcbc23ba7eaadcd319786b982278a1a2b2020720b19db43e309579ff76fb", size = 371992, upload-time = "2025-11-02T21:39:46.968Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/a9/16/ce166754e7c9d10650e02914552fa637cf3b2591f7ed16632bbf6b783312/bitarray-3.8.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:aa7dec53c25f1949513457ef8b0ea1fb40e76c672cc4d2daa8ad3c8d6b73491a", size = 340315, upload-time = "2025-11-02T21:39:48.182Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/de/2a/fbba3a106ddd260e84b9a624f730257c32ba51a8a029565248dfedfdf6f2/bitarray-3.8.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:15a2eff91f54d2b1f573cca8ca6fb58763ce8fea80e7899ab028f3987ef71cd5", size = 330473, upload-time = "2025-11-02T21:39:49.705Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/68/97/56cf3c70196e7307ad32318a9d6ed969dbdc6a4534bbe429112fa7dfe42e/bitarray-3.8.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:b1572ee0eb1967e71787af636bb7d1eb9c6735d5337762c450650e7f51844594", size = 358129, upload-time = "2025-11-02T21:39:51.189Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/fd/be/afd391a5c0896d3339613321b2f94af853f29afc8bd3fbc327431244c642/bitarray-3.8.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:5bfac7f236ba1a4d402644bdce47fb9db02a7cf3214a1f637d3a88390f9e5428", size = 356005, upload-time = "2025-11-02T21:39:52.355Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ae/08/a8e1a371babba29bad3378bb3a2cdca2b012170711e7fe1f22031a6b7b95/bitarray-3.8.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f0a55cf02d2cdd739b40ce10c09bbdd520e141217696add7a48b56e67bdfdfe6", size = 336862, upload-time = "2025-11-02T21:39:54.345Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ee/8a/6dc1d0fdc06991c8dc3b1fcfe1ae49fbaced42064cd1b5f24278e73fe05f/bitarray-3.8.0-cp313-cp313-win32.whl", hash = "sha256:a2ba92f59e30ce915e9e79af37649432e3a212ddddf416d4d686b1b4825bcdb2", size = 143018, upload-time = "2025-11-02T21:39:56.361Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/2e/72/76e13f5cd23b8b9071747909663ce3b02da24a5e7e22c35146338625db35/bitarray-3.8.0-cp313-cp313-win_amd64.whl", hash = "sha256:1c8f2a5d8006db5a555e06f9437e76bf52537d3dfd130cb8ae2b30866aca32c9", size = 149977, upload-time = "2025-11-02T21:39:57.718Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/01/37/60f336c32336cc3ec03b0c61076f16ea2f05d5371c8a56e802161d218b77/bitarray-3.8.0-cp313-cp313-win_arm64.whl", hash = "sha256:50ddbe3a7b4b6ab96812f5a4d570f401a2cdb95642fd04c062f98939610bbeee", size = 146930, upload-time = "2025-11-02T21:39:59.308Z" }, -] - -[[package]] -name = "bitstring" -version = "4.3.1" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -dependencies = [ - { name = "bitarray" }, -] -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/15/a8/a80c890db75d5bdd5314b5de02c4144c7de94fd0cefcae51acaeb14c6a3f/bitstring-4.3.1.tar.gz", hash = "sha256:a08bc09d3857216d4c0f412a1611056f1cc2b64fd254fb1e8a0afba7cfa1a95a", size = 251426, upload-time = "2025-03-22T09:39:06.978Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/75/2d/174566b533755ddf8efb32a5503af61c756a983de379f8ad3aed6a982d38/bitstring-4.3.1-py3-none-any.whl", hash = "sha256:69d1587f0ac18dc7d93fc7e80d5f447161a33e57027e726dc18a0a8bacf1711a", size = 71930, upload-time = "2025-03-22T09:39:05.163Z" }, -] - -[[package]] -name = "blinker" -version = "1.9.0" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/21/28/9b3f50ce0e048515135495f198351908d99540d69bfdc8c1d15b73dc55ce/blinker-1.9.0.tar.gz", hash = "sha256:b4ce2265a7abece45e7cc896e98dbebe6cead56bcf805a3d23136d145f5445bf", size = 22460, upload-time = "2024-11-08T17:25:47.436Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/10/cb/f2ad4230dc2eb1a74edf38f1a38b9b52277f75bef262d8908e60d957e13c/blinker-1.9.0-py3-none-any.whl", hash = "sha256:ba0efaa9080b619ff2f3459d1d500c57bddea4a6b424b60a91141db6fd2f08bc", size = 8458, upload-time = "2024-11-08T17:25:46.184Z" }, -] - [[package]] name = "bracex" version = "2.6" @@ -477,44 +295,6 @@ wheels = [ { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/9d/2a/9186535ce58db529927f6cf5990a849aa9e052eea3e2cfefe20b9e1802da/bracex-2.6-py3-none-any.whl", hash = "sha256:0b0049264e7340b3ec782b5cb99beb325f36c3782a32e36e876452fd49a09952", size = 11508, upload-time = "2025-06-22T19:12:29.781Z" }, ] -[[package]] -name = "brotli" -version = "1.1.0" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/2f/c2/f9e977608bdf958650638c3f1e28f85a1b075f075ebbe77db8555463787b/Brotli-1.1.0.tar.gz", hash = "sha256:81de08ac11bcb85841e440c13611c00b67d3bf82698314928d0b676362546724", size = 7372270, upload-time = "2023-09-07T14:05:41.643Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/5c/d0/5373ae13b93fe00095a58efcbce837fd470ca39f703a235d2a999baadfbc/Brotli-1.1.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:32d95b80260d79926f5fab3c41701dbb818fde1c9da590e77e571eefd14abe28", size = 815693, upload-time = "2024-10-18T12:32:23.824Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/8e/48/f6e1cdf86751300c288c1459724bfa6917a80e30dbfc326f92cea5d3683a/Brotli-1.1.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b760c65308ff1e462f65d69c12e4ae085cff3b332d894637f6273a12a482d09f", size = 422489, upload-time = "2024-10-18T12:32:25.641Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/06/88/564958cedce636d0f1bed313381dfc4b4e3d3f6015a63dae6146e1b8c65c/Brotli-1.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:316cc9b17edf613ac76b1f1f305d2a748f1b976b033b049a6ecdfd5612c70409", size = 873081, upload-time = "2023-09-07T14:03:57.967Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/58/79/b7026a8bb65da9a6bb7d14329fd2bd48d2b7f86d7329d5cc8ddc6a90526f/Brotli-1.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:caf9ee9a5775f3111642d33b86237b05808dafcd6268faa492250e9b78046eb2", size = 446244, upload-time = "2023-09-07T14:03:59.319Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e5/18/c18c32ecea41b6c0004e15606e274006366fe19436b6adccc1ae7b2e50c2/Brotli-1.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70051525001750221daa10907c77830bc889cb6d865cc0b813d9db7fefc21451", size = 2906505, upload-time = "2023-09-07T14:04:01.327Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/08/c8/69ec0496b1ada7569b62d85893d928e865df29b90736558d6c98c2031208/Brotli-1.1.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7f4bf76817c14aa98cc6697ac02f3972cb8c3da93e9ef16b9c66573a68014f91", size = 2944152, upload-time = "2023-09-07T14:04:03.033Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ab/fb/0517cea182219d6768113a38167ef6d4eb157a033178cc938033a552ed6d/Brotli-1.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d0c5516f0aed654134a2fc936325cc2e642f8a0e096d075209672eb321cff408", size = 2919252, upload-time = "2023-09-07T14:04:04.675Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c7/53/73a3431662e33ae61a5c80b1b9d2d18f58dfa910ae8dd696e57d39f1a2f5/Brotli-1.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6c3020404e0b5eefd7c9485ccf8393cfb75ec38ce75586e046573c9dc29967a0", size = 2845955, upload-time = "2023-09-07T14:04:06.585Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/55/ac/bd280708d9c5ebdbf9de01459e625a3e3803cce0784f47d633562cf40e83/Brotli-1.1.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:4ed11165dd45ce798d99a136808a794a748d5dc38511303239d4e2363c0695dc", size = 2914304, upload-time = "2023-09-07T14:04:08.668Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/76/58/5c391b41ecfc4527d2cc3350719b02e87cb424ef8ba2023fb662f9bf743c/Brotli-1.1.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:4093c631e96fdd49e0377a9c167bfd75b6d0bad2ace734c6eb20b348bc3ea180", size = 2814452, upload-time = "2023-09-07T14:04:10.736Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c7/4e/91b8256dfe99c407f174924b65a01f5305e303f486cc7a2e8a5d43c8bec3/Brotli-1.1.0-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:7e4c4629ddad63006efa0ef968c8e4751c5868ff0b1c5c40f76524e894c50248", size = 2938751, upload-time = "2023-09-07T14:04:12.875Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/5a/a6/e2a39a5d3b412938362bbbeba5af904092bf3f95b867b4a3eb856104074e/Brotli-1.1.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:861bf317735688269936f755fa136a99d1ed526883859f86e41a5d43c61d8966", size = 2933757, upload-time = "2023-09-07T14:04:14.551Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/13/f0/358354786280a509482e0e77c1a5459e439766597d280f28cb097642fc26/Brotli-1.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:87a3044c3a35055527ac75e419dfa9f4f3667a1e887ee80360589eb8c90aabb9", size = 2936146, upload-time = "2024-10-18T12:32:27.257Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/80/f7/daf538c1060d3a88266b80ecc1d1c98b79553b3f117a485653f17070ea2a/Brotli-1.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:c5529b34c1c9d937168297f2c1fde7ebe9ebdd5e121297ff9c043bdb2ae3d6fb", size = 2848055, upload-time = "2024-10-18T12:32:29.376Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ad/cf/0eaa0585c4077d3c2d1edf322d8e97aabf317941d3a72d7b3ad8bce004b0/Brotli-1.1.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:ca63e1890ede90b2e4454f9a65135a4d387a4585ff8282bb72964fab893f2111", size = 3035102, upload-time = "2024-10-18T12:32:31.371Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d8/63/1c1585b2aa554fe6dbce30f0c18bdbc877fa9a1bf5ff17677d9cca0ac122/Brotli-1.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e79e6520141d792237c70bcd7a3b122d00f2613769ae0cb61c52e89fd3443839", size = 2930029, upload-time = "2024-10-18T12:32:33.293Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/5f/3b/4e3fd1893eb3bbfef8e5a80d4508bec17a57bb92d586c85c12d28666bb13/Brotli-1.1.0-cp312-cp312-win32.whl", hash = "sha256:5f4d5ea15c9382135076d2fb28dde923352fe02951e66935a9efaac8f10e81b0", size = 333276, upload-time = "2023-09-07T14:04:16.49Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/3d/d5/942051b45a9e883b5b6e98c041698b1eb2012d25e5948c58d6bf85b1bb43/Brotli-1.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:906bc3a79de8c4ae5b86d3d75a8b77e44404b0f4261714306e3ad248d8ab0951", size = 357255, upload-time = "2023-09-07T14:04:17.83Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/0a/9f/fb37bb8ffc52a8da37b1c03c459a8cd55df7a57bdccd8831d500e994a0ca/Brotli-1.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8bf32b98b75c13ec7cf774164172683d6e7891088f6316e54425fde1efc276d5", size = 815681, upload-time = "2024-10-18T12:32:34.942Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/06/b3/dbd332a988586fefb0aa49c779f59f47cae76855c2d00f450364bb574cac/Brotli-1.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7bc37c4d6b87fb1017ea28c9508b36bbcb0c3d18b4260fcdf08b200c74a6aee8", size = 422475, upload-time = "2024-10-18T12:32:36.485Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/bb/80/6aaddc2f63dbcf2d93c2d204e49c11a9ec93a8c7c63261e2b4bd35198283/Brotli-1.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c0ef38c7a7014ffac184db9e04debe495d317cc9c6fb10071f7fefd93100a4f", size = 2906173, upload-time = "2024-10-18T12:32:37.978Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ea/1d/e6ca79c96ff5b641df6097d299347507d39a9604bde8915e76bf026d6c77/Brotli-1.1.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91d7cc2a76b5567591d12c01f019dd7afce6ba8cba6571187e21e2fc418ae648", size = 2943803, upload-time = "2024-10-18T12:32:39.606Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ac/a3/d98d2472e0130b7dd3acdbb7f390d478123dbf62b7d32bda5c830a96116d/Brotli-1.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a93dde851926f4f2678e704fadeb39e16c35d8baebd5252c9fd94ce8ce68c4a0", size = 2918946, upload-time = "2024-10-18T12:32:41.679Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c4/a5/c69e6d272aee3e1423ed005d8915a7eaa0384c7de503da987f2d224d0721/Brotli-1.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f0db75f47be8b8abc8d9e31bc7aad0547ca26f24a54e6fd10231d623f183d089", size = 2845707, upload-time = "2024-10-18T12:32:43.478Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/58/9f/4149d38b52725afa39067350696c09526de0125ebfbaab5acc5af28b42ea/Brotli-1.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6967ced6730aed543b8673008b5a391c3b1076d834ca438bbd70635c73775368", size = 2936231, upload-time = "2024-10-18T12:32:45.224Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/5a/5a/145de884285611838a16bebfdb060c231c52b8f84dfbe52b852a15780386/Brotli-1.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:7eedaa5d036d9336c95915035fb57422054014ebdeb6f3b42eac809928e40d0c", size = 2848157, upload-time = "2024-10-18T12:32:46.894Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/50/ae/408b6bfb8525dadebd3b3dd5b19d631da4f7d46420321db44cd99dcf2f2c/Brotli-1.1.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:d487f5432bf35b60ed625d7e1b448e2dc855422e87469e3f450aa5552b0eb284", size = 3035122, upload-time = "2024-10-18T12:32:48.844Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/af/85/a94e5cfaa0ca449d8f91c3d6f78313ebf919a0dbd55a100c711c6e9655bc/Brotli-1.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:832436e59afb93e1836081a20f324cb185836c617659b07b129141a8426973c7", size = 2930206, upload-time = "2024-10-18T12:32:51.198Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c2/f0/a61d9262cd01351df22e57ad7c34f66794709acab13f34be2675f45bf89d/Brotli-1.1.0-cp313-cp313-win32.whl", hash = "sha256:43395e90523f9c23a3d5bdf004733246fba087f2948f87ab28015f12359ca6a0", size = 333804, upload-time = "2024-10-18T12:32:52.661Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/7e/c1/ec214e9c94000d1c1974ec67ced1c970c148aa6b8d8373066123fc3dbf06/Brotli-1.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:9011560a466d2eb3f5a6e4929cf4a09be405c64154e12df0dd72713f6500e32b", size = 358517, upload-time = "2024-10-18T12:32:54.066Z" }, -] - [[package]] name = "cachetools" version = "6.2.4" @@ -524,35 +304,6 @@ wheels = [ { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/2c/fc/1d7b80d0eb7b714984ce40efc78859c022cd930e402f599d8ca9e39c78a4/cachetools-6.2.4-py3-none-any.whl", hash = "sha256:69a7a52634fed8b8bf6e24a050fb60bff1c9bd8f6d24572b99c32d4e71e62a51", size = 11551, upload-time = "2025-12-15T18:24:52.332Z" }, ] -[[package]] -name = "capstone" -version = "5.0.3" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/27/45/d811f0f3b345c8882b9179f7e310f222ba6af45f0cc729028cbf35c6ce03/capstone-5.0.3.tar.gz", hash = "sha256:1f15616c0528f5268f2dc0a81708483e605ce71961b02a01a791230b51fe862d", size = 2943858, upload-time = "2024-08-20T15:20:08.044Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/cc/40/503cdbc0b38c5b8b5bcad47ae507b70cecbb18365a82470a46f425788061/capstone-5.0.3-py3-none-macosx_10_9_universal2.whl", hash = "sha256:5dc6b5f4d85a2ba2330473529d7b4faccd71470b489fd19a539c20b74f3c9924", size = 2176874, upload-time = "2024-08-20T15:19:44.511Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/60/3f/89f13db42cd24af1c80831b6ac978a8a42ad860a35cc461ec009c281e8f3/capstone-5.0.3-py3-none-macosx_10_9_x86_64.whl", hash = "sha256:5917b00ddb0b3a665bcb7a623ee08bcaf4ddec783c5d2ca69f72278d6de38d2c", size = 1180331, upload-time = "2024-08-20T15:19:46.149Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/36/79/ddc8a36238791e58ec31a13c10887447b56819d934ab00d6e1371a2e94f1/capstone-5.0.3-py3-none-macosx_11_0_arm64.whl", hash = "sha256:76658b66a5f0275c942cd4daf35dca7520387689f588f127020c9130d76c4ddf", size = 1192365, upload-time = "2024-08-20T15:19:47.535Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/22/bc/39d69c65b7d58157d0c3acad9b929a3f0852c84e44b0e25b6fb89a9e1a7d/capstone-5.0.3-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:237fef7e20906bad09286554c7f8a5a744adeec4d658c5e33938bf9241b5f076", size = 2829855, upload-time = "2024-08-20T15:19:49.353Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/24/f4/ae2d072d39664b7c32da9c8877f7d204692565f7c844da10e70c47730191/capstone-5.0.3-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a6b7f3ec2346a60475d9d043b437ace5e6688fc9c4d45387476104bd2c231040", size = 2921407, upload-time = "2024-08-20T15:19:51.186Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/7f/1b/1b200d7a32de3b2bf61ee0316385aa566d698d20af99d8e2e00e5ec4b144/capstone-5.0.3-py3-none-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1f25e23052d5fa5b6709f7111d7f1250131fb5dac878b8249a4637e6ab953129", size = 2941774, upload-time = "2024-08-20T15:19:53.736Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d8/82/dd45915b0de636ef949b9d56bd529ae52987d74db1c2d2b7feb4dee322f6/capstone-5.0.3-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:aebb35f0cfb808aac24d2e3c745be43bcedbb863ecb0192d720faa8002983544", size = 2840714, upload-time = "2024-08-20T15:19:59.593Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/6b/e0/d3b617dd552a802760d63e29392c217266505f531c799f4b4cf3e9906eef/capstone-5.0.3-py3-none-musllinux_1_2_i686.whl", hash = "sha256:8d45fdf4ae11197234eedbd81742bd4650924f9bb3abee0b238ea63dad8001cc", size = 2937106, upload-time = "2024-08-20T15:20:02.066Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/00/88/3459e1124a7c1bdc02fa2870ffeecdd0fa2be2b4b3382732cd3e3b2e92f2/capstone-5.0.3-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:63e8544d7500b889a5feb6f7c9a939723663107dd3360ad18f36a3f6985cfd26", size = 2895850, upload-time = "2024-08-20T15:20:04.166Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/35/29/f751bb52b73b0a2e6414ad4e9271fa0cba966e747c1f3351a9c46e9f1ca7/capstone-5.0.3-py3-none-win_amd64.whl", hash = "sha256:21d00dce9dc204a22126fe6f45a8894d03a6607182bc86f14fc9bd1d1d3897ed", size = 1270430, upload-time = "2024-08-20T15:20:06.092Z" }, -] - -[[package]] -name = "cart" -version = "1.2.3" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -dependencies = [ - { name = "pycryptodome" }, -] -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/de/38/9180e61177d57840a6431784f26a291dc01d8fa32f3a58087919e27c95cd/cart-1.2.3-py2.py3-none-any.whl", hash = "sha256:32a1f16a5b521e077a540b98942e74d3ae04fba58be3c658f7b7c14dcef1a53f", size = 10449, upload-time = "2025-02-10T17:14:42.402Z" }, -] - [[package]] name = "certifi" version = "2025.11.12" @@ -656,42 +407,6 @@ wheels = [ { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/0a/4c/925909008ed5a988ccbb72dcc897407e5d6d3bd72410d69e051fc0c14647/charset_normalizer-3.4.4-py3-none-any.whl", hash = "sha256:7a32c560861a02ff789ad905a2fe94e3f840803362c84fecf1851cb4cf3dc37f", size = 53402, upload-time = "2025-10-14T04:42:31.76Z" }, ] -[[package]] -name = "claripy" -version = "9.2.191" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -dependencies = [ - { name = "cachetools" }, - { name = "typing-extensions" }, - { name = "z3-solver" }, -] -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/fd/e3/0eb13ac36149adf962a8e87f96a2e6aaa73a26486375c84d0755361adf4e/claripy-9.2.191.tar.gz", hash = "sha256:34151964784e884f6f1474be62e7e263f9952bb5b55fc53c6174cd1a0d551252", size = 147144, upload-time = "2025-12-30T17:44:28.253Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/6c/00/d8e47b281dc75c0ac6e1cfbe32913e4d3bbd617f494dd080be2502a5bbbf/claripy-9.2.191-py3-none-any.whl", hash = "sha256:159d98d761c900f605e4491ac92681161347fe7b2c901d75da7db9d5d2246d63", size = 141513, upload-time = "2025-12-30T17:43:30.493Z" }, -] - -[[package]] -name = "cle" -version = "9.2.191" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -dependencies = [ - { name = "archinfo" }, - { name = "arpy" }, - { name = "cart" }, - { name = "minidump" }, - { name = "pefile" }, - { name = "pyelftools" }, - { name = "pyvex" }, - { name = "pyxbe" }, - { name = "pyxdia" }, - { name = "sortedcontainers" }, - { name = "uefi-firmware" }, -] -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b9/08/114d1375d30da242fb37d07cd9bbd7845219ce68f68f9bd9045122981bed/cle-9.2.191.tar.gz", hash = "sha256:f0cf9b46b663292c944f8a0bc9d61cc88b0c31a52381d839f11ea16dec0195ee", size = 199791, upload-time = "2025-12-30T17:44:29.553Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/1f/b2/0469ca9b121f71d64e43b1dcb348099dcc71f6061ab87f22314f02b00cf2/cle-9.2.191-py3-none-any.whl", hash = "sha256:d82a46a102b162b803e0a07fd00ea725335f03c2773b4b4ba6c6f5c8f4832751", size = 204019, upload-time = "2025-12-30T17:43:31.745Z" }, -] - [[package]] name = "click" version = "8.3.1" @@ -805,15 +520,6 @@ wheels = [ { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c9/ad/51f212198681ea7b0deaaf8846ee10af99fba4e894f67b353524eab2bbe5/cryptography-44.0.3-cp39-abi3-win_amd64.whl", hash = "sha256:5d186f32e52e66994dce4f766884bcb9c68b8da62d61d9d215bfe5fb56d21334", size = 3210375, upload-time = "2025-05-02T19:35:35.369Z" }, ] -[[package]] -name = "cxxheaderparser" -version = "1.6.2" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/71/88/bcb6400315da0df5f18197a285863d7da99bfec02a46c91df95a225df18f/cxxheaderparser-1.6.2.tar.gz", hash = "sha256:4574f09908609a7bf5d93841fae6e677b803e9708f564ae599382c1f0e18c7ed", size = 52562, upload-time = "2025-12-16T05:02:14.048Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/98/3b/ef8c9839b958b853f9a081de7a2a8c48c4f73b00269cf1048e009727c032/cxxheaderparser-1.6.2-py3-none-any.whl", hash = "sha256:92aae2b45cb96e4bb5b4be392cca73afef439f06d612c5da3a9cab6d7a13e4ae", size = 58935, upload-time = "2025-12-16T05:02:12.433Z" }, -] - [[package]] name = "cyclopts" version = "4.4.3" @@ -1117,22 +823,6 @@ wheels = [ { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/18/79/1b8fa1bb3568781e84c9200f951c735f3f157429f44be0495da55894d620/filetype-1.2.0-py2.py3-none-any.whl", hash = "sha256:7ce71b6880181241cf7ac8697a2f1eb6a8bd9b429f7ad6d27b8db9ba5f1c2d25", size = 19970, upload-time = "2022-11-02T17:34:01.425Z" }, ] -[[package]] -name = "flask" -version = "3.1.0" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -dependencies = [ - { name = "blinker" }, - { name = "click" }, - { name = "itsdangerous" }, - { name = "jinja2" }, - { name = "werkzeug" }, -] -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/89/50/dff6380f1c7f84135484e176e0cac8690af72fa90e932ad2a0a60e28c69b/flask-3.1.0.tar.gz", hash = "sha256:5f873c5184c897c8d9d1b05df1e3d01b14910ce69607a117bd3277098a5836ac", size = 680824, upload-time = "2024-11-13T18:24:38.127Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/af/47/93213ee66ef8fae3b93b3e29206f6b251e65c97bd91d8e1c5596ef15af0a/flask-3.1.0-py3-none-any.whl", hash = "sha256:d667207822eb83f1c4b50949b1623c8fc8d51f2341d65f72e1a1815397551136", size = 102979, upload-time = "2024-11-13T18:24:36.135Z" }, -] - [[package]] name = "frozenlist" version = "1.8.0" @@ -1190,39 +880,6 @@ wheels = [ { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/9a/9a/e35b4a917281c0b8419d4207f4334c8e8c5dbf4f3f5f9ada73958d937dcc/frozenlist-1.8.0-py3-none-any.whl", hash = "sha256:0c18a16eab41e82c295618a77502e17b195883241c563b00f0aa5106fc4eaa0d", size = 13409, upload-time = "2025-10-06T05:38:16.721Z" }, ] -[[package]] -name = "future" -version = "1.0.0" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/a7/b2/4140c69c6a66432916b26158687e821ba631a4c9273c474343badf84d3ba/future-1.0.0.tar.gz", hash = "sha256:bd2968309307861edae1458a4f8a4f3598c03be43b97521076aebf5d94c07b05", size = 1228490, upload-time = "2024-02-21T11:52:38.461Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/da/71/ae30dadffc90b9006d77af76b393cb9dfbfc9629f339fc1574a1c52e6806/future-1.0.0-py3-none-any.whl", hash = "sha256:929292d34f5872e70396626ef385ec22355a1fae8ad29e1a734c3e43f9fbc216", size = 491326, upload-time = "2024-02-21T11:52:35.956Z" }, -] - -[[package]] -name = "gitdb" -version = "4.0.12" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -dependencies = [ - { name = "smmap" }, -] -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/72/94/63b0fc47eb32792c7ba1fe1b694daec9a63620db1e313033d18140c2320a/gitdb-4.0.12.tar.gz", hash = "sha256:5ef71f855d191a3326fcfbc0d5da835f26b13fbcba60c32c21091c349ffdb571", size = 394684, upload-time = "2025-01-02T07:20:46.413Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/a0/61/5c78b91c3143ed5c14207f463aecfc8f9dbb5092fb2869baf37c273b2705/gitdb-4.0.12-py3-none-any.whl", hash = "sha256:67073e15955400952c6565cc3e707c554a4eea2e428946f7a4c162fab9bd9bcf", size = 62794, upload-time = "2025-01-02T07:20:43.624Z" }, -] - -[[package]] -name = "gitpython" -version = "3.1.45" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -dependencies = [ - { name = "gitdb" }, -] -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/9a/c8/dd58967d119baab745caec2f9d853297cec1989ec1d63f677d3880632b88/gitpython-3.1.45.tar.gz", hash = "sha256:85b0ee964ceddf211c41b9f27a49086010a190fd8132a24e21f362a4b36a791c", size = 215076, upload-time = "2025-07-24T03:45:54.871Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/01/61/d4b89fec821f72385526e1b9d9a3a0385dda4a72b206d28049e2c7cd39b8/gitpython-3.1.45-py3-none-any.whl", hash = "sha256:8908cb2e02fb3b93b7eb0f2827125cb699869470432cc885f019b8fd0fccff77", size = 208168, upload-time = "2025-07-24T03:45:52.517Z" }, -] - [[package]] name = "google-auth" version = "2.45.0" @@ -1464,15 +1121,6 @@ wheels = [ { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size = 7484, upload-time = "2025-10-18T21:55:41.639Z" }, ] -[[package]] -name = "itsdangerous" -version = "2.2.0" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/9c/cb/8ac0172223afbccb63986cc25049b154ecfb5e85932587206f42317be31d/itsdangerous-2.2.0.tar.gz", hash = "sha256:e0050c0b7da1eea53ffaf149c0cfbb5c6e2e2b69c4bef22c81fa6eb73e5f6173", size = 54410, upload-time = "2024-04-16T21:28:15.614Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/04/96/92447566d16df59b2a776c0fb82dbc4d9e07cd95062562af01e408583fc4/itsdangerous-2.2.0-py3-none-any.whl", hash = "sha256:c6242fc49e35958c8b15141343aa660db5fc54d4f13a1db01a3f5891b98700ef", size = 16234, upload-time = "2024-04-16T21:28:14.499Z" }, -] - [[package]] name = "jaraco-classes" version = "3.4.0" @@ -1579,10 +1227,8 @@ dependencies = [ { name = "aiohttp" }, { name = "aiosmtplib" }, { name = "alembic" }, - { name = "angr" }, { name = "asyncpg" }, { name = "bcrypt" }, - { name = "beautifulsoup4" }, { name = "deepagents" }, { name = "docker" }, { name = "email-validator" }, @@ -1602,7 +1248,8 @@ dependencies = [ { name = "loguru" }, { name = "markdownify" }, { name = "mcp" }, - { name = "mitmproxy" }, + { name = "opentelemetry-api" }, + { name = "opentelemetry-sdk" }, { name = "orjson" }, { name = "passlib", extra = ["bcrypt"] }, { name = "psutil" }, @@ -1618,7 +1265,6 @@ dependencies = [ { name = "redis" }, { name = "requests" }, { name = "rich" }, - { name = "selenium" }, { name = "semver" }, { name = "sqlalchemy" }, { name = "sse-starlette" }, @@ -1645,6 +1291,7 @@ dev = [ dev = [ { name = "mypy" }, { name = "pre-commit" }, + { name = "pyright" }, { name = "pytest" }, { name = "pytest-asyncio" }, { name = "ruff" }, @@ -1658,10 +1305,8 @@ requires-dist = [ { name = "aiohttp", specifier = ">=3.13.2" }, { name = "aiosmtplib", specifier = ">=3.0.0" }, { name = "alembic", specifier = ">=1.14.0" }, - { name = "angr", specifier = ">=9.2.182" }, { name = "asyncpg", specifier = ">=0.30.0" }, { name = "bcrypt", specifier = ">=5.0.0" }, - { name = "beautifulsoup4", specifier = ">=4.14.2" }, { name = "deepagents", specifier = ">=0.4.0" }, { name = "docker", specifier = ">=7.1.0" }, { name = "email-validator", specifier = ">=2.0.0" }, @@ -1682,8 +1327,9 @@ requires-dist = [ { name = "loguru", specifier = ">=0.7.3" }, { name = "markdownify", specifier = ">=0.11.6" }, { name = "mcp", specifier = ">=1.20.0" }, - { name = "mitmproxy", specifier = ">=11.1.3" }, { name = "mypy", marker = "extra == 'dev'", specifier = ">=1.13.0" }, + { name = "opentelemetry-api", specifier = ">=1.25.0" }, + { name = "opentelemetry-sdk", specifier = ">=1.25.0" }, { name = "orjson", specifier = ">=3.10.0" }, { name = "passlib", extras = ["bcrypt"], specifier = ">=1.7.4" }, { name = "pre-commit", marker = "extra == 'dev'", specifier = ">=4.0.0" }, @@ -1704,7 +1350,6 @@ requires-dist = [ { name = "requests", specifier = ">=2.32.5" }, { name = "rich", specifier = ">=13.7.0" }, { name = "ruff", marker = "extra == 'dev'", specifier = ">=0.8.0" }, - { name = "selenium", specifier = ">=4.38.0" }, { name = "semver", specifier = ">=3.0.4" }, { name = "sqlalchemy", specifier = ">=2.0.44" }, { name = "sse-starlette", specifier = ">=2.0.0" }, @@ -1721,6 +1366,7 @@ provides-extras = ["dev"] dev = [ { name = "mypy", specifier = ">=1.13.0" }, { name = "pre-commit", specifier = ">=4.0.0" }, + { name = "pyright", specifier = ">=1.1.409" }, { name = "pytest", specifier = ">=8.0.0" }, { name = "pytest-asyncio", specifier = ">=0.24.0" }, { name = "ruff", specifier = ">=0.8.0" }, @@ -1791,15 +1437,6 @@ wheels = [ { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/41/45/1a4ed80516f02155c51f51e8cedb3c1902296743db0bbc66608a0db2814f/jsonschema_specifications-2025.9.1-py3-none-any.whl", hash = "sha256:98802fee3a11ee76ecaca44429fda8a41bff98b00a0f2838151b113f210cc6fe", size = 18437, upload-time = "2025-09-08T01:34:57.871Z" }, ] -[[package]] -name = "kaitaistruct" -version = "0.10" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/54/04/dd60b9cb65d580ef6cb6eaee975ad1bdd22d46a3f51b07a1e0606710ea88/kaitaistruct-0.10.tar.gz", hash = "sha256:a044dee29173d6afbacf27bcac39daf89b654dd418cfa009ab82d9178a9ae52a", size = 7061, upload-time = "2022-07-09T00:34:06.729Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/4e/bf/88ad23efc08708bda9a2647169828e3553bb2093a473801db61f75356395/kaitaistruct-0.10-py2.py3-none-any.whl", hash = "sha256:a97350919adbf37fda881f75e9365e2fb88d04832b7a4e57106ec70119efb235", size = 7013, upload-time = "2022-07-09T00:34:03.905Z" }, -] - [[package]] name = "keyring" version = "25.7.0" @@ -2018,18 +1655,6 @@ wheels = [ { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ed/d8/91a8b483b30e0708a8911df10b4ce04ebf2b4b8dde8d020c124aec77380a/langsmith-0.5.2-py3-none-any.whl", hash = "sha256:42f8b853a18dd4d5f7fa38c8ff29e38da065a727022da410d91b3e13819aacc1", size = 283311, upload-time = "2025-12-30T13:41:33.915Z" }, ] -[[package]] -name = "ldap3" -version = "2.9.1" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -dependencies = [ - { name = "pyasn1" }, -] -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/43/ac/96bd5464e3edbc61595d0d69989f5d9969ae411866427b2500a8e5b812c0/ldap3-2.9.1.tar.gz", hash = "sha256:f3e7fc4718e3f09dda568b57100095e0ce58633bcabbed8667ce3f8fbaa4229f", size = 398830, upload-time = "2021-07-18T06:34:21.786Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/4e/f6/71d6ec9f18da0b2201287ce9db6afb1a1f637dedb3f0703409558981c723/ldap3-2.9.1-py2.py3-none-any.whl", hash = "sha256:5869596fc4948797020d3f03b7939da938778a0f9e2009f7a072ccf92b8e8d70", size = 432192, upload-time = "2021-07-18T06:34:12.905Z" }, -] - [[package]] name = "librt" version = "0.7.5" @@ -2215,96 +1840,6 @@ wheels = [ { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8", size = 9979, upload-time = "2022-08-14T12:40:09.779Z" }, ] -[[package]] -name = "minidump" -version = "0.0.24" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/26/4b/bc695b99dc7d77d28223765c3ee5a31d34fd2850c52eb683ccdd1206067d/minidump-0.0.24.tar.gz", hash = "sha256:f7ae09b944f3b17ccf5cecc66f9ff5a7a45b053474a13aeb012f4c9204470437", size = 60709, upload-time = "2024-08-15T14:54:22.728Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/da/8b/3a148750e6d65f4fe6e249f6d0d6c497eeadd867a63ed69cee3fa17e7c83/minidump-0.0.24-py3-none-any.whl", hash = "sha256:9c016e35c8fe37c82a01b0a266f5416a0b0138934d92affb436ac2e72372bec6", size = 78365, upload-time = "2024-08-15T14:54:21.124Z" }, -] - -[[package]] -name = "mitmproxy" -version = "11.1.3" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -dependencies = [ - { name = "aioquic" }, - { name = "argon2-cffi" }, - { name = "asgiref" }, - { name = "brotli" }, - { name = "certifi" }, - { name = "cryptography" }, - { name = "flask" }, - { name = "h11" }, - { name = "h2" }, - { name = "hyperframe" }, - { name = "kaitaistruct" }, - { name = "ldap3" }, - { name = "mitmproxy-rs" }, - { name = "msgpack" }, - { name = "passlib" }, - { name = "publicsuffix2" }, - { name = "pydivert", marker = "sys_platform == 'win32'" }, - { name = "pyopenssl" }, - { name = "pyparsing" }, - { name = "pyperclip" }, - { name = "ruamel-yaml" }, - { name = "sortedcontainers" }, - { name = "tornado" }, - { name = "urwid" }, - { name = "wsproto" }, - { name = "zstandard" }, -] -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/14/07/0a8528ea8d4e08a7cf19bf41158d3fcd0baad3686059ae54ee2d647d81db/mitmproxy-11.1.3-py3-none-any.whl", hash = "sha256:2305880b46465d1a9bdcdac369655826f588d05f382b082249a3e532a0e52952", size = 1662554, upload-time = "2025-02-17T12:10:28.138Z" }, -] - -[[package]] -name = "mitmproxy-linux" -version = "0.11.5" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c1/81/aebf603418ad01c70d2944e45f646889693cedd89c2993a2c4e3dc975b07/mitmproxy_linux-0.11.5.tar.gz", hash = "sha256:ee3782fe4e7ccc6a899fa0ef5ad3e35a3ec358587304bd4d212188d2462c8f82", size = 1285776, upload-time = "2025-02-17T11:54:42.132Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/81/ed/f864f39e733f6ecaaddf894c0f295983a6b5b09055d00a659eb08001b0d1/mitmproxy_linux-0.11.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7ce0b91d7a510009d532e6abbebe59f027a011fa745b13faa5b4d9ebe92abf5", size = 962015, upload-time = "2025-02-17T11:54:24.592Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/05/0c/5cc04ac3b7bb21b464d1109745ddfbdefc478ca0501b6cb5f7a91edd8516/mitmproxy_linux-0.11.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e6a31faf244a9e3d44db02e3e3301aa2e699da67188820982a93028884f4cba8", size = 1040306, upload-time = "2025-02-17T11:54:26.199Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/25/f8/25d0483cd26fd6488c7fc16f0f8797ec19104863bff6bb3ee7dc56995b69/mitmproxy_linux-0.11.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:544be1db84575fc8ecc71fb566032cabe4a65a4891d5bd0dc688e3023b49a18a", size = 962015, upload-time = "2025-02-17T11:54:28.547Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/80/12/6a9f189f7aa0b8dfb1c2017b41f2fdb43d64b32e30d1f5fa7e6aeb69c218/mitmproxy_linux-0.11.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00a40d08a1522d5718e9ff87458a950f06f62e5374d154d851122c0eb41c5dc0", size = 1040306, upload-time = "2025-02-17T11:54:30.611Z" }, -] - -[[package]] -name = "mitmproxy-macos" -version = "0.11.5" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/9d/e5/060bb75c98120f5a2fc0cde20f376cc947e5b8474cb2d8ebabade69fbf8f/mitmproxy_macos-0.11.5-py3-none-any.whl", hash = "sha256:8f7aaa646acc64ba4790a7f4d46cb9fbfd7cb0411b9b7a567db0404864bff28d", size = 2658276, upload-time = "2025-02-17T11:54:31.833Z" }, -] - -[[package]] -name = "mitmproxy-rs" -version = "0.11.5" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -dependencies = [ - { name = "mitmproxy-linux", marker = "sys_platform == 'linux'" }, - { name = "mitmproxy-macos", marker = "sys_platform == 'darwin'" }, - { name = "mitmproxy-windows", marker = "os_name == 'nt'" }, -] -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/06/fc/a944a0efa89606efde1f8d8acfd763b69b8d13d5d84d8f8ea79939682204/mitmproxy_rs-0.11.5.tar.gz", hash = "sha256:05f0da03165c2ee2803f91e6648bc9409692f42d796cbaf3fec5a20754ca8c39", size = 1296760, upload-time = "2025-02-17T11:54:43.933Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/63/18/91a37552505b5e1baea555425f8ab30694cf6e16a34e2a528e0ae70ca6b1/mitmproxy_rs-0.11.5-cp310-abi3-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:2f668dc92573cc3c3ba8c75b166276d846ce7321daf37f4a68bd837538298c5c", size = 3811905, upload-time = "2025-02-17T11:54:34.21Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/97/a2/aa81e54a27572b4d9503e79e9999019fdf4c1e1f2a7b8a083a7fa01f7bd6/mitmproxy_rs-0.11.5-cp310-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:971241cb70bad87b55f12bc6e8d7dd3efd02a1acbe1730703e2cfeeb6edd3908", size = 1512445, upload-time = "2025-02-17T11:54:35.766Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/29/34/430966c7a5dc998dec4e9f73d5628b2ccadaf73c26697020ad87e5183e16/mitmproxy_rs-0.11.5-cp310-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a4ffe6d20b3a0edb47b40cd60e7b62709c29e8adf2573514cc0abd1442acf63", size = 1605733, upload-time = "2025-02-17T11:54:37Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/13/a7/43999d162b44b5848c0d663790027711927bded4b506a01f3f36d386d57f/mitmproxy_rs-0.11.5-cp310-abi3-win_amd64.whl", hash = "sha256:5353ad0c828aaa37ac53511f3960e39c0888848565f5faa3ea09e205ed8a7350", size = 1539652, upload-time = "2025-02-17T11:54:38.531Z" }, -] - -[[package]] -name = "mitmproxy-windows" -version = "0.11.5" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/7f/4e/65804c55c0457e87c33e94b3c92421e4519337dd17a747795ef9c507da95/mitmproxy_windows-0.11.5-py3-none-any.whl", hash = "sha256:76035ddf3067b07a2200e286a9fdb3d447cd4a9755dca1d5cb06935947b52592", size = 480403, upload-time = "2025-02-17T11:54:40.204Z" }, -] - [[package]] name = "more-itertools" version = "10.8.0" @@ -2314,75 +1849,6 @@ wheels = [ { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/a4/8e/469e5a4a2f5855992e425f3cb33804cc07bf18d48f2db061aec61ce50270/more_itertools-10.8.0-py3-none-any.whl", hash = "sha256:52d4362373dcf7c52546bc4af9a86ee7c4579df9a8dc268be0a2f949d376cc9b", size = 69667, upload-time = "2025-09-02T15:23:09.635Z" }, ] -[[package]] -name = "mpmath" -version = "1.3.0" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e0/47/dd32fa426cc72114383ac549964eecb20ecfd886d1e5ccf5340b55b02f57/mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f", size = 508106, upload-time = "2023-03-07T16:47:11.061Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/43/e3/7d92a15f894aa0c9c4b49b8ee9ac9850d6e63b03c9c32c0367a13ae62209/mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c", size = 536198, upload-time = "2023-03-07T16:47:09.197Z" }, -] - -[[package]] -name = "msgpack" -version = "1.1.0" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/cb/d0/7555686ae7ff5731205df1012ede15dd9d927f6227ea151e901c7406af4f/msgpack-1.1.0.tar.gz", hash = "sha256:dd432ccc2c72b914e4cb77afce64aab761c1137cc698be3984eee260bcb2896e", size = 167260, upload-time = "2024-09-10T04:25:52.197Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e1/d6/716b7ca1dbde63290d2973d22bbef1b5032ca634c3ff4384a958ec3f093a/msgpack-1.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:d46cf9e3705ea9485687aa4001a76e44748b609d260af21c4ceea7f2212a501d", size = 152421, upload-time = "2024-09-10T04:25:49.63Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/70/da/5312b067f6773429cec2f8f08b021c06af416bba340c912c2ec778539ed6/msgpack-1.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5dbad74103df937e1325cc4bfeaf57713be0b4f15e1c2da43ccdd836393e2ea2", size = 85277, upload-time = "2024-09-10T04:24:48.562Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/28/51/da7f3ae4462e8bb98af0d5bdf2707f1b8c65a0d4f496e46b6afb06cbc286/msgpack-1.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:58dfc47f8b102da61e8949708b3eafc3504509a5728f8b4ddef84bd9e16ad420", size = 82222, upload-time = "2024-09-10T04:25:36.49Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/33/af/dc95c4b2a49cff17ce47611ca9ba218198806cad7796c0b01d1e332c86bb/msgpack-1.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4676e5be1b472909b2ee6356ff425ebedf5142427842aa06b4dfd5117d1ca8a2", size = 392971, upload-time = "2024-09-10T04:24:58.129Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f1/54/65af8de681fa8255402c80eda2a501ba467921d5a7a028c9c22a2c2eedb5/msgpack-1.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17fb65dd0bec285907f68b15734a993ad3fc94332b5bb21b0435846228de1f39", size = 401403, upload-time = "2024-09-10T04:25:40.428Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/97/8c/e333690777bd33919ab7024269dc3c41c76ef5137b211d776fbb404bfead/msgpack-1.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a51abd48c6d8ac89e0cfd4fe177c61481aca2d5e7ba42044fd218cfd8ea9899f", size = 385356, upload-time = "2024-09-10T04:25:31.406Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/57/52/406795ba478dc1c890559dd4e89280fa86506608a28ccf3a72fbf45df9f5/msgpack-1.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2137773500afa5494a61b1208619e3871f75f27b03bcfca7b3a7023284140247", size = 383028, upload-time = "2024-09-10T04:25:17.08Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e7/69/053b6549bf90a3acadcd8232eae03e2fefc87f066a5b9fbb37e2e608859f/msgpack-1.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:398b713459fea610861c8a7b62a6fec1882759f308ae0795b5413ff6a160cf3c", size = 391100, upload-time = "2024-09-10T04:25:08.993Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/23/f0/d4101d4da054f04274995ddc4086c2715d9b93111eb9ed49686c0f7ccc8a/msgpack-1.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:06f5fd2f6bb2a7914922d935d3b8bb4a7fff3a9a91cfce6d06c13bc42bec975b", size = 394254, upload-time = "2024-09-10T04:25:06.048Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/1c/12/cf07458f35d0d775ff3a2dc5559fa2e1fcd06c46f1ef510e594ebefdca01/msgpack-1.1.0-cp312-cp312-win32.whl", hash = "sha256:ad33e8400e4ec17ba782f7b9cf868977d867ed784a1f5f2ab46e7ba53b6e1e1b", size = 69085, upload-time = "2024-09-10T04:25:01.494Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/73/80/2708a4641f7d553a63bc934a3eb7214806b5b39d200133ca7f7afb0a53e8/msgpack-1.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:115a7af8ee9e8cddc10f87636767857e7e3717b7a2e97379dc2054712693e90f", size = 75347, upload-time = "2024-09-10T04:25:33.106Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c8/b0/380f5f639543a4ac413e969109978feb1f3c66e931068f91ab6ab0f8be00/msgpack-1.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:071603e2f0771c45ad9bc65719291c568d4edf120b44eb36324dcb02a13bfddf", size = 151142, upload-time = "2024-09-10T04:24:59.656Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c8/ee/be57e9702400a6cb2606883d55b05784fada898dfc7fd12608ab1fdb054e/msgpack-1.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0f92a83b84e7c0749e3f12821949d79485971f087604178026085f60ce109330", size = 84523, upload-time = "2024-09-10T04:25:37.924Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/7e/3a/2919f63acca3c119565449681ad08a2f84b2171ddfcff1dba6959db2cceb/msgpack-1.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4a1964df7b81285d00a84da4e70cb1383f2e665e0f1f2a7027e683956d04b734", size = 81556, upload-time = "2024-09-10T04:24:28.296Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/7c/43/a11113d9e5c1498c145a8925768ea2d5fce7cbab15c99cda655aa09947ed/msgpack-1.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59caf6a4ed0d164055ccff8fe31eddc0ebc07cf7326a2aaa0dbf7a4001cd823e", size = 392105, upload-time = "2024-09-10T04:25:20.153Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/2d/7b/2c1d74ca6c94f70a1add74a8393a0138172207dc5de6fc6269483519d048/msgpack-1.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0907e1a7119b337971a689153665764adc34e89175f9a34793307d9def08e6ca", size = 399979, upload-time = "2024-09-10T04:25:41.75Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/82/8c/cf64ae518c7b8efc763ca1f1348a96f0e37150061e777a8ea5430b413a74/msgpack-1.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:65553c9b6da8166e819a6aa90ad15288599b340f91d18f60b2061f402b9a4915", size = 383816, upload-time = "2024-09-10T04:24:45.826Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/69/86/a847ef7a0f5ef3fa94ae20f52a4cacf596a4e4a010197fbcc27744eb9a83/msgpack-1.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7a946a8992941fea80ed4beae6bff74ffd7ee129a90b4dd5cf9c476a30e9708d", size = 380973, upload-time = "2024-09-10T04:25:04.689Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/aa/90/c74cf6e1126faa93185d3b830ee97246ecc4fe12cf9d2d31318ee4246994/msgpack-1.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4b51405e36e075193bc051315dbf29168d6141ae2500ba8cd80a522964e31434", size = 387435, upload-time = "2024-09-10T04:24:17.879Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/7a/40/631c238f1f338eb09f4acb0f34ab5862c4e9d7eda11c1b685471a4c5ea37/msgpack-1.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b4c01941fd2ff87c2a934ee6055bda4ed353a7846b8d4f341c428109e9fcde8c", size = 399082, upload-time = "2024-09-10T04:25:18.398Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e9/1b/fa8a952be252a1555ed39f97c06778e3aeb9123aa4cccc0fd2acd0b4e315/msgpack-1.1.0-cp313-cp313-win32.whl", hash = "sha256:7c9a35ce2c2573bada929e0b7b3576de647b0defbd25f5139dcdaba0ae35a4cc", size = 69037, upload-time = "2024-09-10T04:24:52.798Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b6/bc/8bd826dd03e022153bfa1766dcdec4976d6c818865ed54223d71f07862b3/msgpack-1.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:bce7d9e614a04d0883af0b3d4d501171fbfca038f12c77fa838d9f198147a23f", size = 75140, upload-time = "2024-09-10T04:24:31.288Z" }, -] - -[[package]] -name = "msgspec" -version = "0.20.0" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ea/9c/bfbd12955a49180cbd234c5d29ec6f74fe641698f0cd9df154a854fc8a15/msgspec-0.20.0.tar.gz", hash = "sha256:692349e588fde322875f8d3025ac01689fead5901e7fb18d6870a44519d62a29", size = 317862, upload-time = "2025-11-24T03:56:28.934Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d9/6f/1e25eee957e58e3afb2a44b94fa95e06cebc4c236193ed0de3012fff1e19/msgspec-0.20.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:2aba22e2e302e9231e85edc24f27ba1f524d43c223ef5765bd8624c7df9ec0a5", size = 196391, upload-time = "2025-11-24T03:55:32.677Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/7f/ee/af51d090ada641d4b264992a486435ba3ef5b5634bc27e6eb002f71cef7d/msgspec-0.20.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:716284f898ab2547fedd72a93bb940375de9fbfe77538f05779632dc34afdfde", size = 188644, upload-time = "2025-11-24T03:55:33.934Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/49/d6/9709ee093b7742362c2934bfb1bbe791a1e09bed3ea5d8a18ce552fbfd73/msgspec-0.20.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:558ed73315efa51b1538fa8f1d3b22c8c5ff6d9a2a62eff87d25829b94fc5054", size = 218852, upload-time = "2025-11-24T03:55:35.575Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/5c/a2/488517a43ccf5a4b6b6eca6dd4ede0bd82b043d1539dd6bb908a19f8efd3/msgspec-0.20.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:509ac1362a1d53aa66798c9b9fd76872d7faa30fcf89b2fba3bcbfd559d56eb0", size = 224937, upload-time = "2025-11-24T03:55:36.859Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d5/e8/49b832808aa23b85d4f090d1d2e48a4e3834871415031ed7c5fe48723156/msgspec-0.20.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1353c2c93423602e7dea1aa4c92f3391fdfc25ff40e0bacf81d34dbc68adb870", size = 222858, upload-time = "2025-11-24T03:55:38.187Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/9f/56/1dc2fa53685dca9c3f243a6cbecd34e856858354e455b77f47ebd76cf5bf/msgspec-0.20.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:cb33b5eb5adb3c33d749684471c6a165468395d7aa02d8867c15103b81e1da3e", size = 227248, upload-time = "2025-11-24T03:55:39.496Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/5a/51/aba940212c23b32eedce752896205912c2668472ed5b205fc33da28a6509/msgspec-0.20.0-cp312-cp312-win_amd64.whl", hash = "sha256:fb1d934e435dd3a2b8cf4bbf47a8757100b4a1cfdc2afdf227541199885cdacb", size = 190024, upload-time = "2025-11-24T03:55:40.829Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/41/ad/3b9f259d94f183daa9764fef33fdc7010f7ecffc29af977044fa47440a83/msgspec-0.20.0-cp312-cp312-win_arm64.whl", hash = "sha256:00648b1e19cf01b2be45444ba9dc961bd4c056ffb15706651e64e5d6ec6197b7", size = 175390, upload-time = "2025-11-24T03:55:42.05Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/8a/d1/b902d38b6e5ba3bdddbec469bba388d647f960aeed7b5b3623a8debe8a76/msgspec-0.20.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9c1ff8db03be7598b50dd4b4a478d6fe93faae3bd54f4f17aa004d0e46c14c46", size = 196463, upload-time = "2025-11-24T03:55:43.405Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/57/b6/eff0305961a1d9447ec2b02f8c73c8946f22564d302a504185b730c9a761/msgspec-0.20.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f6532369ece217fd37c5ebcfd7e981f2615628c21121b7b2df9d3adcf2fd69b8", size = 188650, upload-time = "2025-11-24T03:55:44.761Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/99/93/f2ec1ae1de51d3fdee998a1ede6b2c089453a2ee82b5c1b361ed9095064a/msgspec-0.20.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f9a1697da2f85a751ac3cc6a97fceb8e937fc670947183fb2268edaf4016d1ee", size = 218834, upload-time = "2025-11-24T03:55:46.441Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/28/83/36557b04cfdc317ed8a525c4993b23e43a8fbcddaddd78619112ca07138c/msgspec-0.20.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7fac7e9c92eddcd24c19d9e5f6249760941485dff97802461ae7c995a2450111", size = 224917, upload-time = "2025-11-24T03:55:48.06Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/8f/56/362037a1ed5be0b88aced59272442c4b40065c659700f4b195a7f4d0ac88/msgspec-0.20.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f953a66f2a3eb8d5ea64768445e2bb301d97609db052628c3e1bcb7d87192a9f", size = 222821, upload-time = "2025-11-24T03:55:49.388Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/92/75/fa2370ec341cedf663731ab7042e177b3742645c5dd4f64dc96bd9f18a6b/msgspec-0.20.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:247af0313ae64a066d3aea7ba98840f6681ccbf5c90ba9c7d17f3e39dbba679c", size = 227227, upload-time = "2025-11-24T03:55:51.125Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f1/25/5e8080fe0117f799b1b68008dc29a65862077296b92550632de015128579/msgspec-0.20.0-cp313-cp313-win_amd64.whl", hash = "sha256:67d5e4dfad52832017018d30a462604c80561aa62a9d548fc2bd4e430b66a352", size = 189966, upload-time = "2025-11-24T03:55:52.458Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/79/b6/63363422153937d40e1cb349c5081338401f8529a5a4e216865decd981bf/msgspec-0.20.0-cp313-cp313-win_arm64.whl", hash = "sha256:91a52578226708b63a9a13de287b1ec3ed1123e4a088b198143860c087770458", size = 175378, upload-time = "2025-11-24T03:55:53.721Z" }, -] - -[[package]] -name = "mulpyplexer" -version = "0.9" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d7/6f/f036d4fb9f5a511262345bb18215c70fa69a250aca2e5ffe1a9c7e4cb85c/mulpyplexer-0.09.tar.gz", hash = "sha256:144e9e9bf66d3988f60542c9d3d4c94857438f7908f60e53f4c1cb1622fbbd30", size = 2846, upload-time = "2021-01-11T21:10:21.68Z" } - [[package]] name = "multidict" version = "6.7.0" @@ -2482,15 +1948,6 @@ wheels = [ { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/79/7b/2c79738432f5c924bef5071f933bcc9efd0473bac3b4aa584a6f7c1c8df8/mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505", size = 4963, upload-time = "2025-04-22T14:54:22.983Z" }, ] -[[package]] -name = "networkx" -version = "3.6.1" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/6a/51/63fe664f3908c97be9d2e4f1158eb633317598cfa6e1fc14af5383f17512/networkx-3.6.1.tar.gz", hash = "sha256:26b7c357accc0c8cde558ad486283728b65b6a95d85ee1cd66bafab4c8168509", size = 2517025, upload-time = "2025-12-08T17:02:39.908Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/9e/c9/b2622292ea83fbb4ec318f5b9ab867d0a28ab43c5717bb85b0a5f6b3b0a4/networkx-3.6.1-py3-none-any.whl", hash = "sha256:d47fbf302e7d9cbbb9e2555a0d267983d2aa476bac30e90dfbe5669bd57f3762", size = 2068504, upload-time = "2025-12-08T17:02:38.159Z" }, -] - [[package]] name = "nodeenv" version = "1.10.0" @@ -2500,46 +1957,6 @@ wheels = [ { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/88/b2/d0896bdcdc8d28a7fc5717c305f1a861c26e18c05047949fb371034d98bd/nodeenv-1.10.0-py2.py3-none-any.whl", hash = "sha256:5bb13e3eed2923615535339b3c620e76779af4cb4c6a90deccc9e36b274d3827", size = 23438, upload-time = "2025-12-20T14:08:52.782Z" }, ] -[[package]] -name = "numpy" -version = "2.4.0" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/a4/7a/6a3d14e205d292b738db449d0de649b373a59edb0d0b4493821d0a3e8718/numpy-2.4.0.tar.gz", hash = "sha256:6e504f7b16118198f138ef31ba24d985b124c2c469fe8467007cf30fd992f934", size = 20685720, upload-time = "2025-12-20T16:18:19.023Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/8b/ff/f6400ffec95de41c74b8e73df32e3fff1830633193a7b1e409be7fb1bb8c/numpy-2.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:2a8b6bb8369abefb8bd1801b054ad50e02b3275c8614dc6e5b0373c305291037", size = 16653117, upload-time = "2025-12-20T16:16:06.709Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/fd/28/6c23e97450035072e8d830a3c411bf1abd1f42c611ff9d29e3d8f55c6252/numpy-2.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2e284ca13d5a8367e43734148622caf0b261b275673823593e3e3634a6490f83", size = 12369711, upload-time = "2025-12-20T16:16:08.758Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/bc/af/acbef97b630ab1bb45e6a7d01d1452e4251aa88ce680ac36e56c272120ec/numpy-2.4.0-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:49ff32b09f5aa0cd30a20c2b39db3e669c845589f2b7fc910365210887e39344", size = 5198355, upload-time = "2025-12-20T16:16:10.902Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c1/c8/4e0d436b66b826f2e53330adaa6311f5cac9871a5b5c31ad773b27f25a74/numpy-2.4.0-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:36cbfb13c152b1c7c184ddac43765db8ad672567e7bafff2cc755a09917ed2e6", size = 6545298, upload-time = "2025-12-20T16:16:12.607Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ef/27/e1f5d144ab54eac34875e79037011d511ac57b21b220063310cb96c80fbc/numpy-2.4.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:35ddc8f4914466e6fc954c76527aa91aa763682a4f6d73249ef20b418fe6effb", size = 14398387, upload-time = "2025-12-20T16:16:14.257Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/67/64/4cb909dd5ab09a9a5d086eff9586e69e827b88a5585517386879474f4cf7/numpy-2.4.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dc578891de1db95b2a35001b695451767b580bb45753717498213c5ff3c41d63", size = 16363091, upload-time = "2025-12-20T16:16:17.32Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/9d/9c/8efe24577523ec6809261859737cf117b0eb6fdb655abdfdc81b2e468ce4/numpy-2.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:98e81648e0b36e325ab67e46b5400a7a6d4a22b8a7c8e8bbfe20e7db7906bf95", size = 16176394, upload-time = "2025-12-20T16:16:19.524Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/61/f0/1687441ece7b47a62e45a1f82015352c240765c707928edd8aef875d5951/numpy-2.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d57b5046c120561ba8fa8e4030fbb8b822f3063910fa901ffadf16e2b7128ad6", size = 18287378, upload-time = "2025-12-20T16:16:22.866Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d3/6f/f868765d44e6fc466467ed810ba9d8d6db1add7d4a748abfa2a4c99a3194/numpy-2.4.0-cp312-cp312-win32.whl", hash = "sha256:92190db305a6f48734d3982f2c60fa30d6b5ee9bff10f2887b930d7b40119f4c", size = 5955432, upload-time = "2025-12-20T16:16:25.06Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d4/b5/94c1e79fcbab38d1ca15e13777477b2914dd2d559b410f96949d6637b085/numpy-2.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:680060061adb2d74ce352628cb798cfdec399068aa7f07ba9fb818b2b3305f98", size = 12306201, upload-time = "2025-12-20T16:16:26.979Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/70/09/c39dadf0b13bb0768cd29d6a3aaff1fb7c6905ac40e9aaeca26b1c086e06/numpy-2.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:39699233bc72dd482da1415dcb06076e32f60eddc796a796c5fb6c5efce94667", size = 10308234, upload-time = "2025-12-20T16:16:29.417Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/a7/0d/853fd96372eda07c824d24adf02e8bc92bb3731b43a9b2a39161c3667cc4/numpy-2.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a152d86a3ae00ba5f47b3acf3b827509fd0b6cb7d3259665e63dafbad22a75ea", size = 16649088, upload-time = "2025-12-20T16:16:31.421Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e3/37/cc636f1f2a9f585434e20a3e6e63422f70bfe4f7f6698e941db52ea1ac9a/numpy-2.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:39b19251dec4de8ff8496cd0806cbe27bf0684f765abb1f4809554de93785f2d", size = 12364065, upload-time = "2025-12-20T16:16:33.491Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ed/69/0b78f37ca3690969beee54103ce5f6021709134e8020767e93ba691a72f1/numpy-2.4.0-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:009bd0ea12d3c784b6639a8457537016ce5172109e585338e11334f6a7bb88ee", size = 5192640, upload-time = "2025-12-20T16:16:35.636Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/1d/2a/08569f8252abf590294dbb09a430543ec8f8cc710383abfb3e75cc73aeda/numpy-2.4.0-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:5fe44e277225fd3dff6882d86d3d447205d43532c3627313d17e754fb3905a0e", size = 6541556, upload-time = "2025-12-20T16:16:37.276Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/93/e9/a949885a4e177493d61519377952186b6cbfdf1d6002764c664ba28349b5/numpy-2.4.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f935c4493eda9069851058fa0d9e39dbf6286be690066509305e52912714dbb2", size = 14396562, upload-time = "2025-12-20T16:16:38.953Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/99/98/9d4ad53b0e9ef901c2ef1d550d2136f5ac42d3fd2988390a6def32e23e48/numpy-2.4.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8cfa5f29a695cb7438965e6c3e8d06e0416060cf0d709c1b1c1653a939bf5c2a", size = 16351719, upload-time = "2025-12-20T16:16:41.503Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/28/de/5f3711a38341d6e8dd619f6353251a0cdd07f3d6d101a8fd46f4ef87f895/numpy-2.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ba0cb30acd3ef11c94dc27fbfba68940652492bc107075e7ffe23057f9425681", size = 16176053, upload-time = "2025-12-20T16:16:44.552Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/2a/5b/2a3753dc43916501b4183532e7ace862e13211042bceafa253afb5c71272/numpy-2.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:60e8c196cd82cbbd4f130b5290007e13e6de3eca79f0d4d38014769d96a7c475", size = 18277859, upload-time = "2025-12-20T16:16:47.174Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/2c/c5/a18bcdd07a941db3076ef489d036ab16d2bfc2eae0cf27e5a26e29189434/numpy-2.4.0-cp313-cp313-win32.whl", hash = "sha256:5f48cb3e88fbc294dc90e215d86fbaf1c852c63dbdb6c3a3e63f45c4b57f7344", size = 5953849, upload-time = "2025-12-20T16:16:49.554Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/4f/f1/719010ff8061da6e8a26e1980cf090412d4f5f8060b31f0c45d77dd67a01/numpy-2.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:a899699294f28f7be8992853c0c60741f16ff199205e2e6cdca155762cbaa59d", size = 12302840, upload-time = "2025-12-20T16:16:51.227Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f5/5a/b3d259083ed8b4d335270c76966cb6cf14a5d1b69e1a608994ac57a659e6/numpy-2.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:9198f447e1dc5647d07c9a6bbe2063cc0132728cc7175b39dbc796da5b54920d", size = 10308509, upload-time = "2025-12-20T16:16:53.313Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/31/01/95edcffd1bb6c0633df4e808130545c4f07383ab629ac7e316fb44fff677/numpy-2.4.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:74623f2ab5cc3f7c886add4f735d1031a1d2be4a4ae63c0546cfd74e7a31ddf6", size = 12491815, upload-time = "2025-12-20T16:16:55.496Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/59/ea/5644b8baa92cc1c7163b4b4458c8679852733fa74ca49c942cfa82ded4e0/numpy-2.4.0-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:0804a8e4ab070d1d35496e65ffd3cf8114c136a2b81f61dfab0de4b218aacfd5", size = 5320321, upload-time = "2025-12-20T16:16:57.468Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/26/4e/e10938106d70bc21319bd6a86ae726da37edc802ce35a3a71ecdf1fdfe7f/numpy-2.4.0-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:02a2038eb27f9443a8b266a66911e926566b5a6ffd1a689b588f7f35b81e7dc3", size = 6641635, upload-time = "2025-12-20T16:16:59.379Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b3/8d/a8828e3eaf5c0b4ab116924df82f24ce3416fa38d0674d8f708ddc6c8aac/numpy-2.4.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1889b3a3f47a7b5bee16bc25a2145bd7cb91897f815ce3499db64c7458b6d91d", size = 14456053, upload-time = "2025-12-20T16:17:01.768Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/68/a1/17d97609d87d4520aa5ae2dcfb32305654550ac6a35effb946d303e594ce/numpy-2.4.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:85eef4cb5625c47ee6425c58a3502555e10f45ee973da878ac8248ad58c136f3", size = 16401702, upload-time = "2025-12-20T16:17:04.235Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/18/32/0f13c1b2d22bea1118356b8b963195446f3af124ed7a5adfa8fdecb1b6ca/numpy-2.4.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6dc8b7e2f4eb184b37655195f421836cfae6f58197b67e3ffc501f1333d993fa", size = 16242493, upload-time = "2025-12-20T16:17:06.856Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ae/23/48f21e3d309fbc137c068a1475358cbd3a901b3987dcfc97a029ab3068e2/numpy-2.4.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:44aba2f0cafd287871a495fb3163408b0bd25bbce135c6f621534a07f4f7875c", size = 18324222, upload-time = "2025-12-20T16:17:09.392Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ac/52/41f3d71296a3dcaa4f456aaa3c6fc8e745b43d0552b6bde56571bb4b4a0f/numpy-2.4.0-cp313-cp313t-win32.whl", hash = "sha256:20c115517513831860c573996e395707aa9fb691eb179200125c250e895fcd93", size = 6076216, upload-time = "2025-12-20T16:17:11.437Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/35/ff/46fbfe60ab0710d2a2b16995f708750307d30eccbb4c38371ea9e986866e/numpy-2.4.0-cp313-cp313t-win_amd64.whl", hash = "sha256:b48e35f4ab6f6a7597c46e301126ceba4c44cd3280e3750f85db48b082624fa4", size = 12444263, upload-time = "2025-12-20T16:17:13.182Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/a3/e3/9189ab319c01d2ed556c932ccf55064c5d75bb5850d1df7a482ce0badead/numpy-2.4.0-cp313-cp313t-win_arm64.whl", hash = "sha256:4d1cfce39e511069b11e67cd0bd78ceff31443b7c9e5c04db73c7a19f572967c", size = 10378265, upload-time = "2025-12-20T16:17:15.211Z" }, -] - [[package]] name = "openai" version = "2.14.0" @@ -2746,18 +2163,6 @@ wheels = [ { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/7d/64/bfa5f4a34d0f15c6aba1b73e73f7441a66d635bd03249d334a4796b7a924/ormsgpack-1.12.1-cp313-cp313-win_arm64.whl", hash = "sha256:cfa22c91cffc10a7fbd43729baff2de7d9c28cef2509085a704168ae31f02568", size = 109986, upload-time = "2025-12-14T07:57:26.569Z" }, ] -[[package]] -name = "outcome" -version = "1.3.0.post0" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -dependencies = [ - { name = "attrs" }, -] -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/98/df/77698abfac98571e65ffeb0c1fba8ffd692ab8458d617a0eed7d9a8d38f2/outcome-1.3.0.post0.tar.gz", hash = "sha256:9dcf02e65f2971b80047b377468e72a268e15c0af3cf1238e6ff14f7f91143b8" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/55/8b/5ab7257531a5d830fc8000c476e63c935488d74609b50f9384a643ec0a62/outcome-1.3.0.post0-py2.py3-none-any.whl", hash = "sha256:e771c5ce06d1415e356078d3bdd68523f284b4ce5419828922b6871e65eda82b" }, -] - [[package]] name = "packaging" version = "25.0" @@ -2808,15 +2213,6 @@ wheels = [ { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/9a/70/875f4a23bfc4731703a5835487d0d2fb999031bd415e7d17c0ae615c18b7/pathvalidate-3.3.1-py3-none-any.whl", hash = "sha256:5263baab691f8e1af96092fa5137ee17df5bdfbd6cff1fcac4d6ef4bc2e1735f", size = 24305, upload-time = "2025-06-15T09:07:19.117Z" }, ] -[[package]] -name = "pefile" -version = "2024.8.26" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/03/4f/2750f7f6f025a1507cd3b7218691671eecfd0bbebebe8b39aa0fe1d360b8/pefile-2024.8.26.tar.gz", hash = "sha256:3ff6c5d8b43e8c37bb6e6dd5085658d658a7a0bdcd20b6a07b1fcfc1c4e9d632", size = 76008, upload-time = "2024-08-26T20:58:38.155Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/54/16/12b82f791c7f50ddec566873d5bdd245baa1491bac11d15ffb98aecc8f8b/pefile-2024.8.26-py3-none-any.whl", hash = "sha256:76f8b485dcd3b1bb8166f1128d395fa3d87af26360c2358fb75b80019b957c6f", size = 74766, upload-time = "2024-08-26T21:01:02.632Z" }, -] - [[package]] name = "platformdirs" version = "4.5.1" @@ -3052,15 +2448,6 @@ wheels = [ { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/80/2d/1bb683f64737bbb1f86c82b7359db1eb2be4e2c0c13b947f80efefa7d3e5/psycopg2_binary-2.9.11-cp313-cp313-win_amd64.whl", hash = "sha256:efff12b432179443f54e230fdf60de1f6cc726b6c832db8701227d089310e8aa", size = 2714215, upload-time = "2025-10-10T11:13:07.14Z" }, ] -[[package]] -name = "publicsuffix2" -version = "2.20191221" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/5a/04/1759906c4c5b67b2903f546de234a824d4028ef24eb0b1122daa43376c20/publicsuffix2-2.20191221.tar.gz", hash = "sha256:00f8cc31aa8d0d5592a5ced19cccba7de428ebca985db26ac852d920ddd6fe7b", size = 99592, upload-time = "2019-12-21T11:30:44.863Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/9d/16/053c2945c5e3aebeefb4ccd5c5e7639e38bc30ad1bdc7ce86c6d01707726/publicsuffix2-2.20191221-py2.py3-none-any.whl", hash = "sha256:786b5e36205b88758bd3518725ec8cfe7a8173f5269354641f581c6b80a99893", size = 89033, upload-time = "2019-12-21T11:30:41.744Z" }, -] - [[package]] name = "py-key-value-aio" version = "0.3.0" @@ -3132,36 +2519,6 @@ wheels = [ { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/a0/e3/59cd50310fc9b59512193629e1984c1f95e5c8ae6e5d8c69532ccc65a7fe/pycparser-2.23-py3-none-any.whl", hash = "sha256:e5c6e8d3fbad53479cab09ac03729e0a9faf2bee3db8208a550daf5af81a5934", size = 118140, upload-time = "2025-09-09T13:23:46.651Z" }, ] -[[package]] -name = "pycryptodome" -version = "3.23.0" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/8e/a6/8452177684d5e906854776276ddd34eca30d1b1e15aa1ee9cefc289a33f5/pycryptodome-3.23.0.tar.gz", hash = "sha256:447700a657182d60338bab09fdb27518f8856aecd80ae4c6bdddb67ff5da44ef", size = 4921276, upload-time = "2025-05-17T17:21:45.242Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/04/5d/bdb09489b63cd34a976cc9e2a8d938114f7a53a74d3dd4f125ffa49dce82/pycryptodome-3.23.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:0011f7f00cdb74879142011f95133274741778abba114ceca229adbf8e62c3e4", size = 2495152, upload-time = "2025-05-17T17:20:20.833Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/a7/ce/7840250ed4cc0039c433cd41715536f926d6e86ce84e904068eb3244b6a6/pycryptodome-3.23.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:90460fc9e088ce095f9ee8356722d4f10f86e5be06e2354230a9880b9c549aae", size = 1639348, upload-time = "2025-05-17T17:20:23.171Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ee/f0/991da24c55c1f688d6a3b5a11940567353f74590734ee4a64294834ae472/pycryptodome-3.23.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4764e64b269fc83b00f682c47443c2e6e85b18273712b98aa43bcb77f8570477", size = 2184033, upload-time = "2025-05-17T17:20:25.424Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/54/16/0e11882deddf00f68b68dd4e8e442ddc30641f31afeb2bc25588124ac8de/pycryptodome-3.23.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb8f24adb74984aa0e5d07a2368ad95276cf38051fe2dc6605cbcf482e04f2a7", size = 2270142, upload-time = "2025-05-17T17:20:27.808Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d5/fc/4347fea23a3f95ffb931f383ff28b3f7b1fe868739182cb76718c0da86a1/pycryptodome-3.23.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d97618c9c6684a97ef7637ba43bdf6663a2e2e77efe0f863cce97a76af396446", size = 2309384, upload-time = "2025-05-17T17:20:30.765Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/6e/d9/c5261780b69ce66d8cfab25d2797bd6e82ba0241804694cd48be41add5eb/pycryptodome-3.23.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:9a53a4fe5cb075075d515797d6ce2f56772ea7e6a1e5e4b96cf78a14bac3d265", size = 2183237, upload-time = "2025-05-17T17:20:33.736Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/5a/6f/3af2ffedd5cfa08c631f89452c6648c4d779e7772dfc388c77c920ca6bbf/pycryptodome-3.23.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:763d1d74f56f031788e5d307029caef067febf890cd1f8bf61183ae142f1a77b", size = 2343898, upload-time = "2025-05-17T17:20:36.086Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/9a/dc/9060d807039ee5de6e2f260f72f3d70ac213993a804f5e67e0a73a56dd2f/pycryptodome-3.23.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:954af0e2bd7cea83ce72243b14e4fb518b18f0c1649b576d114973e2073b273d", size = 2269197, upload-time = "2025-05-17T17:20:38.414Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f9/34/e6c8ca177cb29dcc4967fef73f5de445912f93bd0343c9c33c8e5bf8cde8/pycryptodome-3.23.0-cp313-cp313t-win32.whl", hash = "sha256:257bb3572c63ad8ba40b89f6fc9d63a2a628e9f9708d31ee26560925ebe0210a", size = 1768600, upload-time = "2025-05-17T17:20:40.688Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e4/1d/89756b8d7ff623ad0160f4539da571d1f594d21ee6d68be130a6eccb39a4/pycryptodome-3.23.0-cp313-cp313t-win_amd64.whl", hash = "sha256:6501790c5b62a29fcb227bd6b62012181d886a767ce9ed03b303d1f22eb5c625", size = 1799740, upload-time = "2025-05-17T17:20:42.413Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/5d/61/35a64f0feaea9fd07f0d91209e7be91726eb48c0f1bfc6720647194071e4/pycryptodome-3.23.0-cp313-cp313t-win_arm64.whl", hash = "sha256:9a77627a330ab23ca43b48b130e202582e91cc69619947840ea4d2d1be21eb39", size = 1703685, upload-time = "2025-05-17T17:20:44.388Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/db/6c/a1f71542c969912bb0e106f64f60a56cc1f0fabecf9396f45accbe63fa68/pycryptodome-3.23.0-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:187058ab80b3281b1de11c2e6842a357a1f71b42cb1e15bce373f3d238135c27", size = 2495627, upload-time = "2025-05-17T17:20:47.139Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/6e/4e/a066527e079fc5002390c8acdd3aca431e6ea0a50ffd7201551175b47323/pycryptodome-3.23.0-cp37-abi3-macosx_10_9_x86_64.whl", hash = "sha256:cfb5cd445280c5b0a4e6187a7ce8de5a07b5f3f897f235caa11f1f435f182843", size = 1640362, upload-time = "2025-05-17T17:20:50.392Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/50/52/adaf4c8c100a8c49d2bd058e5b551f73dfd8cb89eb4911e25a0c469b6b4e/pycryptodome-3.23.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67bd81fcbe34f43ad9422ee8fd4843c8e7198dd88dd3d40e6de42ee65fbe1490", size = 2182625, upload-time = "2025-05-17T17:20:52.866Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/5f/e9/a09476d436d0ff1402ac3867d933c61805ec2326c6ea557aeeac3825604e/pycryptodome-3.23.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c8987bd3307a39bc03df5c8e0e3d8be0c4c3518b7f044b0f4c15d1aa78f52575", size = 2268954, upload-time = "2025-05-17T17:20:55.027Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f9/c5/ffe6474e0c551d54cab931918127c46d70cab8f114e0c2b5a3c071c2f484/pycryptodome-3.23.0-cp37-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aa0698f65e5b570426fc31b8162ed4603b0c2841cbb9088e2b01641e3065915b", size = 2308534, upload-time = "2025-05-17T17:20:57.279Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/18/28/e199677fc15ecf43010f2463fde4c1a53015d1fe95fb03bca2890836603a/pycryptodome-3.23.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:53ecbafc2b55353edcebd64bf5da94a2a2cdf5090a6915bcca6eca6cc452585a", size = 2181853, upload-time = "2025-05-17T17:20:59.322Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ce/ea/4fdb09f2165ce1365c9eaefef36625583371ee514db58dc9b65d3a255c4c/pycryptodome-3.23.0-cp37-abi3-musllinux_1_2_i686.whl", hash = "sha256:156df9667ad9f2ad26255926524e1c136d6664b741547deb0a86a9acf5ea631f", size = 2342465, upload-time = "2025-05-17T17:21:03.83Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/22/82/6edc3fc42fe9284aead511394bac167693fb2b0e0395b28b8bedaa07ef04/pycryptodome-3.23.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:dea827b4d55ee390dc89b2afe5927d4308a8b538ae91d9c6f7a5090f397af1aa", size = 2267414, upload-time = "2025-05-17T17:21:06.72Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/59/fe/aae679b64363eb78326c7fdc9d06ec3de18bac68be4b612fc1fe8902693c/pycryptodome-3.23.0-cp37-abi3-win32.whl", hash = "sha256:507dbead45474b62b2bbe318eb1c4c8ee641077532067fec9c1aa82c31f84886", size = 1768484, upload-time = "2025-05-17T17:21:08.535Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/54/2f/e97a1b8294db0daaa87012c24a7bb714147c7ade7656973fd6c736b484ff/pycryptodome-3.23.0-cp37-abi3-win_amd64.whl", hash = "sha256:c75b52aacc6c0c260f204cbdd834f76edc9fb0d8e0da9fbf8352ef58202564e2", size = 1799636, upload-time = "2025-05-17T17:21:10.393Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/18/3d/f9441a0d798bf2b1e645adc3265e55706aead1255ccdad3856dbdcffec14/pycryptodome-3.23.0-cp37-abi3-win_arm64.whl", hash = "sha256:11eeeb6917903876f134b56ba11abe95c0b0fd5e3330def218083c7d98bbcb3c", size = 1703675, upload-time = "2025-05-17T17:21:13.146Z" }, -] - [[package]] name = "pydantic" version = "2.12.5" @@ -3272,33 +2629,6 @@ wheels = [ { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c1/60/5d4751ba3f4a40a6891f24eec885f51afd78d208498268c734e256fb13c4/pydantic_settings-2.12.0-py3-none-any.whl", hash = "sha256:fddb9fd99a5b18da837b29710391e945b1e30c135477f484084ee513adb93809", size = 51880, upload-time = "2025-11-10T14:25:45.546Z" }, ] -[[package]] -name = "pydemumble" -version = "0.0.1" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c6/a9/1a78b7b6ed256c7bfec610e20658a3c8ed726a4c1a4652f2a17059c3563c/pydemumble-0.0.1.tar.gz", hash = "sha256:715f34fa999add1ef9337d1aae32d3b4b2a2a8dea0ddf363fcb1dac10593ca43", size = 1298294, upload-time = "2025-02-15T05:19:10.803Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/69/62/d479f63a145e50fb42dfa1090d0f5a58b3af94d02990e84c09c7b8cc2e7c/pydemumble-0.0.1-cp312-abi3-macosx_10_14_x86_64.whl", hash = "sha256:2dc4848ad97afb897877ee537820b38f427e30fe0a3b8dac25fe5c7b44d60cf0", size = 150273, upload-time = "2025-02-15T05:18:52.897Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/fa/3e/4acec3739eef2b05779d250cbb9420feb61d8a3a1945651826620c03be12/pydemumble-0.0.1-cp312-abi3-macosx_11_0_arm64.whl", hash = "sha256:f5c770fde6b5929b5b386fe99960e6c237d62860c0eea0267f179034716c3ca0", size = 143501, upload-time = "2025-02-15T05:18:53.946Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/97/23/2ca50587a14cff273f3c4bb42dd42c87efd921ed09974bd7c43fcd8e02bc/pydemumble-0.0.1-cp312-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94d7348778ed7a483cec06554b6367decb9c16da1a1b251e56faa21f2f34b4cc", size = 175584, upload-time = "2025-02-15T05:18:54.952Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/6e/88/504c1bcd70bf970e387eaf789fd89637d01b333ba2c1b0975977fc7f0d64/pydemumble-0.0.1-cp312-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:18c83eeab994eab54640d347564799dd651857d06b1cdaf60fb81d2215c55a20", size = 190161, upload-time = "2025-02-15T05:18:56.077Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/df/5c/c9f981ea7b10817ec3b71a9d1790deb2c90031ec800e974cf432701efe49/pydemumble-0.0.1-cp312-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24ff7fe892dd90e9a73e8a8e8225247820613b2ac9e6b6d5cf916f3a1d51cc1d", size = 181234, upload-time = "2025-02-15T05:18:57.211Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/2c/ee/e249299975181286cfdbffcd070856e950451d1859aeb564a5ca28ac18a5/pydemumble-0.0.1-cp312-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:488c2ee1c743d0989bdc67c802c941900db77cc8150fb100f38eca2e3a462d5b", size = 588544, upload-time = "2025-02-15T05:18:58.351Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/81/bb/06c2a4861a3f7945d2918f6142bf688eaf4fa6424ddd46e792f00399fee6/pydemumble-0.0.1-cp312-abi3-musllinux_1_2_i686.whl", hash = "sha256:20a3e1e672ab210139640658f4ef3b4e7abe48f4562fc14947b538b95ab3e1c2", size = 653900, upload-time = "2025-02-15T05:18:59.443Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e2/23/7edfc6c6f4fba5fa3f371cdc3ecd774ab98bc4efb8a688a89b802a553f13/pydemumble-0.0.1-cp312-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:76f8bd943d8eec3b65beb09e97fd3a5994a519700243e5ecaf6a6a3ebe3e3809", size = 607638, upload-time = "2025-02-15T05:19:00.684Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/28/a2/b4f22086fb6d479cd01410cd0a57c39748736315a7a6a753f2d6ab9e6b22/pydemumble-0.0.1-cp312-abi3-win32.whl", hash = "sha256:63b965b89401ededcf8fc879196586a6427f4d4ba2be32953170b1965f34b51b", size = 143771, upload-time = "2025-02-15T05:19:01.838Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/6f/7c/d7dc551b63c79795cad5a3c35b0799437e0d34de8d93a3ffb3539bbc8930/pydemumble-0.0.1-cp312-abi3-win_amd64.whl", hash = "sha256:6dd17d309a9b7bd2eaf697e9b07d35d5c88ce30026d12cf6e58d8a9900a737c4", size = 171611, upload-time = "2025-02-15T05:19:02.881Z" }, -] - -[[package]] -name = "pydivert" -version = "2.1.0" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/cf/71/2da9bcf742df3ab23f75f10fedca074951dd13a84bda8dea3077f68ae9a6/pydivert-2.1.0.tar.gz", hash = "sha256:f0e150f4ff591b78e35f514e319561dadff7f24a82186a171dd4d465483de5b4", size = 91057, upload-time = "2017-10-20T21:36:58.165Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ca/8f/86d7931c62013a5a7ebf4e1642a87d4a6050c0f570e714f61b0df1984c62/pydivert-2.1.0-py2.py3-none-any.whl", hash = "sha256:382db488e3c37c03ec9ec94e061a0b24334d78dbaeebb7d4e4d32ce4355d9da1", size = 104718, upload-time = "2017-10-20T21:36:56.726Z" }, -] - [[package]] name = "pydocket" version = "0.16.3" @@ -3322,41 +2652,6 @@ wheels = [ { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/2c/94/93b7f5981aa04f922e0d9ce7326a4587866ec7e39f7c180ffcf408e66ee8/pydocket-0.16.3-py3-none-any.whl", hash = "sha256:e2b50925356e7cd535286255195458ac7bba15f25293356651b36d223db5dd7c", size = 67087, upload-time = "2025-12-23T23:37:31.829Z" }, ] -[[package]] -name = "pydot" -version = "4.0.1" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -dependencies = [ - { name = "pyparsing" }, -] -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/50/35/b17cb89ff865484c6a20ef46bf9d95a5f07328292578de0b295f4a6beec2/pydot-4.0.1.tar.gz", hash = "sha256:c2148f681c4a33e08bf0e26a9e5f8e4099a82e0e2a068098f32ce86577364ad5", size = 162594, upload-time = "2025-06-17T20:09:56.454Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/7e/32/a7125fb28c4261a627f999d5fb4afff25b523800faed2c30979949d6facd/pydot-4.0.1-py3-none-any.whl", hash = "sha256:869c0efadd2708c0be1f916eb669f3d664ca684bc57ffb7ecc08e70d5e93fee6", size = 37087, upload-time = "2025-06-17T20:09:55.25Z" }, -] - -[[package]] -name = "pyelftools" -version = "0.32" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b9/ab/33968940b2deb3d92f5b146bc6d4009a5f95d1d06c148ea2f9ee965071af/pyelftools-0.32.tar.gz", hash = "sha256:6de90ee7b8263e740c8715a925382d4099b354f29ac48ea40d840cf7aa14ace5", size = 15047199, upload-time = "2025-02-19T14:20:05.549Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/af/43/700932c4f0638c3421177144a2e86448c0d75dbaee2c7936bda3f9fd0878/pyelftools-0.32-py3-none-any.whl", hash = "sha256:013df952a006db5e138b1edf6d8a68ecc50630adbd0d83a2d41e7f846163d738", size = 188525, upload-time = "2025-02-19T14:19:59.919Z" }, -] - -[[package]] -name = "pyformlang" -version = "1.0.11" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -dependencies = [ - { name = "networkx" }, - { name = "numpy" }, - { name = "pydot" }, -] -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/70/41/772000760174a909294ad496854b81a882e68458dda22f53b47e1e777f0d/pyformlang-1.0.11.tar.gz", hash = "sha256:e292ec8b9cfa64c26b592faf9b7cfc72c4f4b0eb0d533f045a46061e75c5ce99", size = 97188, upload-time = "2025-03-18T09:59:30.581Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/6a/fe/e26ea3b233672c7a89642b0615e2c44905578a4889eb00b06a358d8ab8c5/pyformlang-1.0.11-py3-none-any.whl", hash = "sha256:a8d76481e4d76e4df1c00cd5c5fce61969635900ef521ad182a07a61ca1c070e", size = 128508, upload-time = "2025-03-18T09:59:28.897Z" }, -] - [[package]] name = "pygments" version = "2.19.2" @@ -3380,65 +2675,6 @@ crypto = [ { name = "cryptography" }, ] -[[package]] -name = "pylsqpack" -version = "0.3.23" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/9a/f3/2681d5d38cd789a62352e105619d353d3c245f463a376c1b9a735e3c47b3/pylsqpack-0.3.23.tar.gz", hash = "sha256:f55b126940d8b3157331f123d4428d703a698a6db65a6a7891f7ec1b90c86c56", size = 676891, upload-time = "2025-10-10T17:12:58.747Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ef/5d/44c5f05d4f72ac427210326a283f74541ad694d517a1c136631fdbcd8e4b/pylsqpack-0.3.23-cp310-abi3-macosx_10_9_x86_64.whl", hash = "sha256:978497811bb58cf7ae11c0e1d4cf9bdf6bccef77556d039ae1836b458cb235fc", size = 162519, upload-time = "2025-10-10T17:12:44.892Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/38/9a/3472903fd88dfa87ac683e7113e0ac9df47b70924db9410b275c6e16b25f/pylsqpack-0.3.23-cp310-abi3-macosx_11_0_arm64.whl", hash = "sha256:8a9e25c5a98a0959c6511aaf7d1a6ac0d6146be349a8c3c09fec2e5250cb2901", size = 167819, upload-time = "2025-10-10T17:12:46.54Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/a7/cf/43e7b04f6397be691a255589fbed25fb4b8d7b707ad8c118408553ff2a5b/pylsqpack-0.3.23-cp310-abi3-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3f7d78352e764732ac1a9ab109aa84e003996a7d64de7098cb20bdc007cf7613", size = 246484, upload-time = "2025-10-10T17:12:47.588Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ed/38/e44ba48404b61b4dd1e9902bef7e01afac5c31e57c5dceec2f0f4e522fcb/pylsqpack-0.3.23-cp310-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8ba86c384dcf8952cef190f8cc4d61cb2a8e4eeaf25093c6aa38b9b696ac82dc", size = 248586, upload-time = "2025-10-10T17:12:48.621Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/1f/46/1f0eb601215bc7596e3003dde6a4c9ad457a4ab35405cdcc56c0727cdf49/pylsqpack-0.3.23-cp310-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:829a2466b80af9766cf0ad795b866796a4000cec441a0eb222357efd01ec6d42", size = 249520, upload-time = "2025-10-10T17:12:49.639Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b9/20/a91d4f90480baaa14aa940512bdfae3774b2524bbf71d3f16391b244b31e/pylsqpack-0.3.23-cp310-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:b516d56078a16592596ea450ea20e9a54650af759754e2e807b7046be13c83ee", size = 246141, upload-time = "2025-10-10T17:12:51.165Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/28/bb/02c018e0fc174122d5bd0cfcbe858d40a4516d9245fca4a7a2dd5201deea/pylsqpack-0.3.23-cp310-abi3-musllinux_1_2_i686.whl", hash = "sha256:db03232c85855cb03226447e41539f8631d7d4e5483d48206e30d470a9cb07a1", size = 246064, upload-time = "2025-10-10T17:12:52.243Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/02/ca/082d31c1180ab856118634a3a26c7739cf38aee656702c1b39dc1acc26a0/pylsqpack-0.3.23-cp310-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:2d91d87672beb0beff6a866dbf35e8b45791d8dffcd5cfd9d8cc397001101fd5", size = 247847, upload-time = "2025-10-10T17:12:53.364Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/6a/33/58e7ced97a04bfb1807143fc70dc7ff3b8abef4e39c5144235f0985e43cc/pylsqpack-0.3.23-cp310-abi3-win32.whl", hash = "sha256:4e5b0b5ec92be6e5e6eb1c52d45271c5c7f8f2a2cd8c672ab240ac2cd893cd26", size = 153227, upload-time = "2025-10-10T17:12:54.459Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/da/da/691477b89927643ea30f36511825e9551d7f36c887ce9bb9903fac31390d/pylsqpack-0.3.23-cp310-abi3-win_amd64.whl", hash = "sha256:498b374b16b51532997998c4cf4021161d2a611f5ea6b02ad95ca99815c54abf", size = 155779, upload-time = "2025-10-10T17:12:55.406Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e0/17/a8bc10443fd4261911dbb41331d39ce2ad28ba82a170eddecf23904b321c/pylsqpack-0.3.23-cp310-abi3-win_arm64.whl", hash = "sha256:2f9a2ef59588d32cd02847c6b9d7140440f67a0751da99f96a2ff4edadc85eae", size = 153188, upload-time = "2025-10-10T17:12:56.782Z" }, -] - -[[package]] -name = "pyopenssl" -version = "25.0.0" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -dependencies = [ - { name = "cryptography" }, - { name = "typing-extensions", marker = "python_full_version < '3.13'" }, -] -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/9f/26/e25b4a374b4639e0c235527bbe31c0524f26eda701d79456a7e1877f4cc5/pyopenssl-25.0.0.tar.gz", hash = "sha256:cd2cef799efa3936bb08e8ccb9433a575722b9dd986023f1cabc4ae64e9dac16", size = 179573, upload-time = "2025-01-12T17:22:48.897Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ca/d7/eb76863d2060dcbe7c7e6cccfd95ac02ea0b9acc37745a0d99ff6457aefb/pyOpenSSL-25.0.0-py3-none-any.whl", hash = "sha256:424c247065e46e76a37411b9ab1782541c23bb658bf003772c3405fbaa128e90", size = 56453, upload-time = "2025-01-12T17:22:43.44Z" }, -] - -[[package]] -name = "pyparsing" -version = "3.2.1" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/8b/1a/3544f4f299a47911c2ab3710f534e52fea62a633c96806995da5d25be4b2/pyparsing-3.2.1.tar.gz", hash = "sha256:61980854fd66de3a90028d679a954d5f2623e83144b5afe5ee86f43d762e5f0a", size = 1067694, upload-time = "2024-12-31T20:59:46.157Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/1c/a7/c8a2d361bf89c0d9577c934ebb7421b25dc84bf3a8e3ac0a40aed9acc547/pyparsing-3.2.1-py3-none-any.whl", hash = "sha256:506ff4f4386c4cec0590ec19e6302d3aedb992fdc02c761e90416f158dacf8e1", size = 107716, upload-time = "2024-12-31T20:59:42.738Z" }, -] - -[[package]] -name = "pypcode" -version = "3.3.3" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f9/4c/7a57f97f569c24607a0490d9bf5e69ad2a29915ece5cb7f0891d4d6847d5/pypcode-3.3.3.tar.gz", hash = "sha256:378b8d3b1552c3251243b421114dd67ccb49660fe762c60fb19dede1279cf30c", size = 2159423, upload-time = "2025-10-13T21:34:36.64Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/39/92/3272987c85f92f6cd6b77b4b2b663c14bb0006aee04e8d55329d9dafdc09/pypcode-3.3.3-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:31be0e2524701546106f98a16faad85f85b89fcd98289ff41d9129f4280fe29d", size = 13805188, upload-time = "2025-10-13T21:33:56.879Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/fd/cb/3c3b182808ad1667cdd9d2dd2994f2c662b8cfa68d206bb5218684377414/pypcode-3.3.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a60bbe863456f42a97b5b33d10ccb97ae2f725542d1fd871b9293000619532af", size = 13756473, upload-time = "2025-10-13T21:33:59.122Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ff/f6/235a8aeabed2bc8e74f4dd5d052c0548f7e2aabad47ad8916873e717c358/pypcode-3.3.3-cp312-cp312-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5241cd815a89229d6ccc26b3d525e38eb1b17df7f60c35f085428351ad199f8d", size = 13844193, upload-time = "2025-10-13T21:34:01.14Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/0c/16/2fd2e676d0de49c9c8afcdcde747019e96cc9a9c50810a93cee69ae1c79a/pypcode-3.3.3-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c2af24d1741aff3e57821c58c90ffafe2d66d540284f126b5e46cec4826060f0", size = 13878012, upload-time = "2025-10-13T21:34:03.221Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c7/d9/b5deaab861c384f3b0702aab2c1a952921cf9ab4cbf7be5dab806bb476cd/pypcode-3.3.3-cp312-cp312-win_amd64.whl", hash = "sha256:1346b59345c361f2fe782db8bef7c7478c57261ff760567096507285f7ff22d0", size = 13628994, upload-time = "2025-10-13T21:34:05.571Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/5e/45/4c5416a4703b3b56f43cb71a0fdf8216620cd486110f9b1c94a6b419c63a/pypcode-3.3.3-cp313-cp313-macosx_10_14_x86_64.whl", hash = "sha256:e7ce476461ce9224a3dd77ad05810322c1f161155e2c773207368a6ff4103708", size = 13805118, upload-time = "2025-10-13T21:34:07.69Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/dc/fb/1b6c995783c26b9fb70ad8feb05dcb82aa3f55252c85bb2499d5b3cf25a7/pypcode-3.3.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:245e36beb6487c9f4aed1fe20b6c40454d93f52d2f9e56ece79c66c569a50020", size = 13756412, upload-time = "2025-10-13T21:34:11.592Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/00/04/69a0557253fc9dcefdb88cb28e254c436dc66e5be0867e8cbf755daa6558/pypcode-3.3.3-cp313-cp313-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d977dcb6519fefe6c198b49a1c3b51ef1aac25c6829c51103104b13bd846f7d7", size = 13844125, upload-time = "2025-10-13T21:34:14.053Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/58/16/c4f618d51529418a7ceb8fd6f8b4a323cc9889afa5211d2b7177d4560e60/pypcode-3.3.3-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2cdfd0c89c24c13fe0f04f570adc3369c39a3c3c5696b86178f74f436a3f7500", size = 13877979, upload-time = "2025-10-13T21:34:16.178Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d0/68/3e3b7f076a4c7bf0b114a0393e7ad8d90ddfb4882f790d2009481e4be4e0/pypcode-3.3.3-cp313-cp313-win_amd64.whl", hash = "sha256:ddcb34b31a904241dcafc22c674511b11455f56a05c11ab1ee3a5973ff0cfd77", size = 13628920, upload-time = "2025-10-13T21:34:18.562Z" }, -] - [[package]] name = "pypdf" version = "6.6.0" @@ -3455,12 +2691,16 @@ source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/30/23/2f0a3efc4d6a32f3b63cdff36cd398d9701d26cda58e3ab97ac79fb5e60d/pyperclip-1.9.0.tar.gz", hash = "sha256:b7de0142ddc81bfc5c7507eea19da920b92252b548b96186caf94a5e2527d310", size = 20961, upload-time = "2024-06-18T20:38:48.401Z" } [[package]] -name = "pysocks" -version = "1.7.1" +name = "pyright" +version = "1.1.409" source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/bd/11/293dd436aea955d45fc4e8a35b6ae7270f5b8e00b53cf6c024c83b657a11/PySocks-1.7.1.tar.gz", hash = "sha256:3f8804571ebe159c380ac6de37643bb4685970655d3bba243530d6558b799aa0", size = 284429, upload-time = "2019-09-20T02:07:35.714Z" } +dependencies = [ + { name = "nodeenv" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/51/4e/3aa27f74211522dba7e9cbc3e74de779c6d4b654c54e50a4840623be8014/pyright-1.1.409.tar.gz", hash = "sha256:986ee05beca9e077c165758ad123667c679e050059a2546aa02473930394bc93", size = 4430434, upload-time = "2026-04-23T11:02:03.799Z" } wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/8d/59/b4572118e098ac8e46e399a1dd0f2d85403ce8bbaad9ec79373ed6badaf9/PySocks-1.7.1-py3-none-any.whl", hash = "sha256:2725bd0a9925919b9b51739eea5f9e2bae91e83288108a9ad338b2e3a4435ee5", size = 16725, upload-time = "2019-09-20T02:06:22.938Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/16/6b/330d8ebae582b30c2959a1ef4c3bc344ebde48c2ff0c3f113c4710735e11/pyright-1.1.409-py3-none-any.whl", hash = "sha256:aa3ea228cab90c845c7a60d28db7a844c04315356392aa09fafcee98c8c22fb3", size = 6438161, upload-time = "2026-04-23T11:02:01.309Z" }, ] [[package]] @@ -3564,32 +2804,6 @@ wheels = [ { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/aa/76/03af049af4dcee5d27442f71b6924f01f3efb5d2bd34f23fcd563f2cc5f5/python_multipart-0.0.21-py3-none-any.whl", hash = "sha256:cf7a6713e01c87aa35387f4774e812c4361150938d20d232800f75ffcf266090", size = 24541, upload-time = "2025-12-17T09:24:21.153Z" }, ] -[[package]] -name = "pyvex" -version = "9.2.191" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -dependencies = [ - { name = "bitstring" }, - { name = "cffi", marker = "implementation_name == 'cpython'" }, -] -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f8/01/de3b1f358184bda64dc5025ca28f5e4be26fa1a88c9fe404ced9e25c8b0d/pyvex-9.2.191.tar.gz", hash = "sha256:b98ddc16db5693f46155f13ba770f272615450b8e3e58ca24b45a5fb5ac4befd", size = 3649102, upload-time = "2025-12-30T17:44:30.636Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/05/ac/ae661cb67c19c462017bcdb355bc0356a2f1cd655586e257f6cdc53963c2/pyvex-9.2.191-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:efc09e4ce15586db59a8604c2c5285094ef8088bc235a731042a66e130503795", size = 1864470, upload-time = "2025-12-30T17:43:55.507Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c4/31/bac6717c87aa58a1b239b37dad27d94ce14df521c3d1497822c96126cb9e/pyvex-9.2.191-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a204ec025f727dff6687740eb542c7391b7d632bd79e98301a4d91a4223ee17a", size = 1587483, upload-time = "2025-12-30T17:43:56.85Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ce/66/f7d85588918edc13d3c5f07ccade62a2d140c53717d76793b01fbca3e9e9/pyvex-9.2.191-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ffd74f6944ed62409afd0ebafafbbda06e352f65175a8b803b49701bb7937815", size = 1783318, upload-time = "2025-12-30T17:43:58.598Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/6d/93/b11bc5f61783a6693c4b0df97aee403740b426f17fde42cf264a2d2354a7/pyvex-9.2.191-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3acc386970e655913f469e0883d81efaa2b60d4e042e48102c645d3429918ba", size = 1915136, upload-time = "2025-12-30T17:43:59.95Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/69/01/ba1651f7d0d9cc9f66bf99507d4b47af49563c1f73bc6de207e7fa71d58c/pyvex-9.2.191-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:46966273c3e808e6c06d78da62f1549c433a6b7c1968bf6a4a85d87052236468", size = 1800213, upload-time = "2025-12-30T17:44:01.367Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/93/a2/73252ea93502c304b2b4335ff9a9c8d98ebd3b8701610f381c35a698aee9/pyvex-9.2.191-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:8b02ecd0aae6f93028d877e87fdb82654b549e2942e227db97eaf1f6e1fb00d0", size = 1946647, upload-time = "2025-12-30T17:44:03.006Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/10/c9/a3e0e652a5077fcbcb781362f4f0dc375951edd4483b839b5b6dca024d58/pyvex-9.2.191-cp312-cp312-win_amd64.whl", hash = "sha256:52f3e47be14bec4a15be38d3af8e62898ade7cd392b7f16b78b8018af9b01c41", size = 1363400, upload-time = "2025-12-30T17:44:04.471Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/4d/c9/a1ba716ef9f2133fcfe77bd190c13abb8254ab757bdff7198ddc279c12e7/pyvex-9.2.191-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b0dec0aa852098dda20dc5888018b619bb55a9b20013ae3e10e8924e4f74503b", size = 1864470, upload-time = "2025-12-30T17:44:06.378Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/04/98/f2fc6834da2c9a2e3e1ab45f7e2ab53c4bb799822aab27c25f2188a0bfbd/pyvex-9.2.191-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b7f1381d2d74c5878364371600d3be900cd852f697a6d4839b5df5de511bc937", size = 1587481, upload-time = "2025-12-30T17:44:07.742Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/04/1d/4f95450cb4ca625103e43db874f5b633816ef811fbe60a0900761dc9a6b2/pyvex-9.2.191-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8ef8fc1ec814738d1371ceee5e05eec2e318a2135282b4468ac591b2c6d51412", size = 1783318, upload-time = "2025-12-30T17:44:09.485Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/4f/d3/273e64b7db5ed3893502c9f432aaaaeda617930363ce28b7d134017e16a1/pyvex-9.2.191-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f0fbea242f96f0520b4d9b5c2231fd9ccbd90cc69be8ff0efba63c04c6c1c15", size = 1915137, upload-time = "2025-12-30T17:44:10.886Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d2/77/14110503262d2fcb1f29ce1227782d39fb8308f4ddabbe608444291cec1a/pyvex-9.2.191-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:441b7e329a6e298ac23058cf68c7d87ae698309510abc7d81a1ba98e2d81d9ce", size = 1800213, upload-time = "2025-12-30T17:44:12.661Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/a7/af/174010241e939f19d606e4442566effe4655b19622e95fa9fe14b6438708/pyvex-9.2.191-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:32aa150c032f43e3ecdff97ed86e8fe92eaf12f59251d76c85b824c3c85c0a55", size = 1946648, upload-time = "2025-12-30T17:44:14.188Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ea/3a/6060b8f682da42e4b533876c7679c9d1bf3b40829d1b881c222a589e4da1/pyvex-9.2.191-cp313-cp313-win_amd64.whl", hash = "sha256:25958bdfddda731cefa2387bf4068e34493ca345d29da57ee96d9e4bc0a1954d", size = 1363401, upload-time = "2025-12-30T17:44:15.79Z" }, -] - [[package]] name = "pywin32" version = "311" @@ -3612,25 +2826,6 @@ wheels = [ { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/de/3d/8161f7711c017e01ac9f008dfddd9410dff3674334c233bde66e7ba65bbf/pywin32_ctypes-0.2.3-py3-none-any.whl", hash = "sha256:8a1513379d709975552d202d942d9837758905c8d01eb82b8bcc30918929e7b8", size = 30756, upload-time = "2024-08-14T10:15:33.187Z" }, ] -[[package]] -name = "pyxbe" -version = "1.0.3" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/df/cb/a090b3a16a6a3d3c0c91f6d9e187df3dae33ce1f5b19b91bbf218dea0a6d/pyxbe-1.0.3.tar.gz", hash = "sha256:ef38c9b07bffd9daecdd32640a3e6c99f62a621a8b8a4d54a0c2ccf9fb1b7cdb" } - -[[package]] -name = "pyxdia" -version = "0.1.0" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/17/d8/6e691bfb19edf50a3286349d5fd4200a569dfc2c900a083e3548a6a1c726/pyxdia-0.1.0.tar.gz", hash = "sha256:af95d1ce70407e7a0f72d02ba77d366c0dfb0ed58fb336f8725ac8f3493b7e68", size = 6043, upload-time = "2025-06-13T20:03:44.192Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/21/d8/fceba98b6faa81534faae2b87b52d669d22f61ea7c64968babe01fa839a8/pyxdia-0.1.0-py3-none-macosx_13_0_x86_64.whl", hash = "sha256:c5c2ca7041294a9e1ff81622b0305569b04c3d511992c84640052899fb039b00", size = 1641463, upload-time = "2025-06-13T20:03:35.701Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/0e/2f/f563746306c448248b98303f2a991812d1180766cb220f4e601f0caec771/pyxdia-0.1.0-py3-none-macosx_14_0_arm64.whl", hash = "sha256:b40738dc999e560fa3283aa2aeb819c34cd9084c7b0949bc2e5e136c631cbbc0", size = 1625815, upload-time = "2025-06-13T20:03:37.489Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ff/d4/46ee9f0c1b0a29160fd7490cb2a5abf32bb64bb5d704404bd937c9498e43/pyxdia-0.1.0-py3-none-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:3809b60a4b95d2bf5a2babd8b50f67b362b10a371abc92a8cf3a86fc648857d8", size = 1383830, upload-time = "2025-06-13T20:03:39.283Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/84/81/638749bc9bba9b4be8a3c8dd6abba85b1cb527bc6507f591e8d7fd1cfd37/pyxdia-0.1.0-py3-none-manylinux_2_28_aarch64.whl", hash = "sha256:28e385eda08eba48883c2c6fc29ea6da8d8536924133468b4b8281a8b24a176a", size = 2706942, upload-time = "2025-06-13T20:03:40.903Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/2f/ea/34d31e0e4484d2acdd25a24d56a26be7855f9119244fd9ea16d00095b01e/pyxdia-0.1.0-py3-none-win_amd64.whl", hash = "sha256:5bd17cec300f53d0bb7926f64095e3806674c6a82f8c2a5c6be24aa6fc1384a1", size = 958051, upload-time = "2025-06-13T20:03:42.789Z" }, -] - [[package]] name = "pyyaml" version = "6.0.3" @@ -3901,46 +3096,6 @@ wheels = [ { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/64/8d/0133e4eb4beed9e425d9a98ed6e081a55d195481b7632472be1af08d2f6b/rsa-4.9.1-py3-none-any.whl", hash = "sha256:68635866661c6836b8d39430f97a996acbd61bfa49406748ea243539fe239762", size = 34696, upload-time = "2025-04-16T09:51:17.142Z" }, ] -[[package]] -name = "ruamel-yaml" -version = "0.18.10" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -dependencies = [ - { name = "ruamel-yaml-clib", marker = "python_full_version < '3.13' and platform_python_implementation == 'CPython'" }, -] -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ea/46/f44d8be06b85bc7c4d8c95d658be2b68f27711f279bf9dd0612a5e4794f5/ruamel.yaml-0.18.10.tar.gz", hash = "sha256:20c86ab29ac2153f80a428e1254a8adf686d3383df04490514ca3b79a362db58", size = 143447, upload-time = "2025-01-06T14:08:51.334Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c2/36/dfc1ebc0081e6d39924a2cc53654497f967a084a436bb64402dfce4254d9/ruamel.yaml-0.18.10-py3-none-any.whl", hash = "sha256:30f22513ab2301b3d2b577adc121c6471f28734d3d9728581245f1e76468b4f1", size = 117729, upload-time = "2025-01-06T14:08:47.471Z" }, -] - -[[package]] -name = "ruamel-yaml-clib" -version = "0.2.15" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ea/97/60fda20e2fb54b83a61ae14648b0817c8f5d84a3821e40bfbdae1437026a/ruamel_yaml_clib-0.2.15.tar.gz", hash = "sha256:46e4cc8c43ef6a94885f72512094e482114a8a706d3c555a34ed4b0d20200600", size = 225794, upload-time = "2025-11-16T16:12:59.761Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/72/4b/5fde11a0722d676e469d3d6f78c6a17591b9c7e0072ca359801c4bd17eee/ruamel_yaml_clib-0.2.15-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:cb15a2e2a90c8475df45c0949793af1ff413acfb0a716b8b94e488ea95ce7cff", size = 149088, upload-time = "2025-11-16T16:13:22.836Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/85/82/4d08ac65ecf0ef3b046421985e66301a242804eb9a62c93ca3437dc94ee0/ruamel_yaml_clib-0.2.15-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:64da03cbe93c1e91af133f5bec37fd24d0d4ba2418eaf970d7166b0a26a148a2", size = 134553, upload-time = "2025-11-16T16:13:24.151Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b9/cb/22366d68b280e281a932403b76da7a988108287adff2bfa5ce881200107a/ruamel_yaml_clib-0.2.15-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:f6d3655e95a80325b84c4e14c080b2470fe4f33b6846f288379ce36154993fb1", size = 737468, upload-time = "2025-11-16T20:22:47.335Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/71/73/81230babf8c9e33770d43ed9056f603f6f5f9665aea4177a2c30ae48e3f3/ruamel_yaml_clib-0.2.15-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:71845d377c7a47afc6592aacfea738cc8a7e876d586dfba814501d8c53c1ba60", size = 753349, upload-time = "2025-11-16T16:13:26.269Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/61/62/150c841f24cda9e30f588ef396ed83f64cfdc13b92d2f925bb96df337ba9/ruamel_yaml_clib-0.2.15-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:11e5499db1ccbc7f4b41f0565e4f799d863ea720e01d3e99fa0b7b5fcd7802c9", size = 788211, upload-time = "2025-11-16T16:13:27.441Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/30/93/e79bd9cbecc3267499d9ead919bd61f7ddf55d793fb5ef2b1d7d92444f35/ruamel_yaml_clib-0.2.15-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4b293a37dc97e2b1e8a1aec62792d1e52027087c8eea4fc7b5abd2bdafdd6642", size = 743203, upload-time = "2025-11-16T16:13:28.671Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/8d/06/1eb640065c3a27ce92d76157f8efddb184bd484ed2639b712396a20d6dce/ruamel_yaml_clib-0.2.15-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:512571ad41bba04eac7268fe33f7f4742210ca26a81fe0c75357fa682636c690", size = 747292, upload-time = "2025-11-16T20:22:48.584Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/a5/21/ee353e882350beab65fcc47a91b6bdc512cace4358ee327af2962892ff16/ruamel_yaml_clib-0.2.15-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e5e9f630c73a490b758bf14d859a39f375e6999aea5ddd2e2e9da89b9953486a", size = 771624, upload-time = "2025-11-16T16:13:29.853Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/57/34/cc1b94057aa867c963ecf9ea92ac59198ec2ee3a8d22a126af0b4d4be712/ruamel_yaml_clib-0.2.15-cp312-cp312-win32.whl", hash = "sha256:f4421ab780c37210a07d138e56dd4b51f8642187cdfb433eb687fe8c11de0144", size = 100342, upload-time = "2025-11-16T16:13:31.067Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b3/e5/8925a4208f131b218f9a7e459c0d6fcac8324ae35da269cb437894576366/ruamel_yaml_clib-0.2.15-cp312-cp312-win_amd64.whl", hash = "sha256:2b216904750889133d9222b7b873c199d48ecbb12912aca78970f84a5aa1a4bc", size = 119013, upload-time = "2025-11-16T16:13:32.164Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/17/5e/2f970ce4c573dc30c2f95825f2691c96d55560268ddc67603dc6ea2dd08e/ruamel_yaml_clib-0.2.15-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4dcec721fddbb62e60c2801ba08c87010bd6b700054a09998c4d09c08147b8fb", size = 147450, upload-time = "2025-11-16T16:13:33.542Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d6/03/a1baa5b94f71383913f21b96172fb3a2eb5576a4637729adbf7cd9f797f8/ruamel_yaml_clib-0.2.15-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:65f48245279f9bb301d1276f9679b82e4c080a1ae25e679f682ac62446fac471", size = 133139, upload-time = "2025-11-16T16:13:34.587Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/dc/19/40d676802390f85784235a05788fd28940923382e3f8b943d25febbb98b7/ruamel_yaml_clib-0.2.15-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:46895c17ead5e22bea5e576f1db7e41cb273e8d062c04a6a49013d9f60996c25", size = 731474, upload-time = "2025-11-16T20:22:49.934Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ce/bb/6ef5abfa43b48dd55c30d53e997f8f978722f02add61efba31380d73e42e/ruamel_yaml_clib-0.2.15-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3eb199178b08956e5be6288ee0b05b2fb0b5c1f309725ad25d9c6ea7e27f962a", size = 748047, upload-time = "2025-11-16T16:13:35.633Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ff/5d/e4f84c9c448613e12bd62e90b23aa127ea4c46b697f3d760acc32cb94f25/ruamel_yaml_clib-0.2.15-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4d1032919280ebc04a80e4fb1e93f7a738129857eaec9448310e638c8bccefcf", size = 782129, upload-time = "2025-11-16T16:13:36.781Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/de/4b/e98086e88f76c00c88a6bcf15eae27a1454f661a9eb72b111e6bbb69024d/ruamel_yaml_clib-0.2.15-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ab0df0648d86a7ecbd9c632e8f8d6b21bb21b5fc9d9e095c796cacf32a728d2d", size = 736848, upload-time = "2025-11-16T16:13:37.952Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/0c/5c/5964fcd1fd9acc53b7a3a5d9a05ea4f95ead9495d980003a557deb9769c7/ruamel_yaml_clib-0.2.15-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:331fb180858dd8534f0e61aa243b944f25e73a4dae9962bd44c46d1761126bbf", size = 741630, upload-time = "2025-11-16T20:22:51.718Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/07/1e/99660f5a30fceb58494598e7d15df883a07292346ef5696f0c0ae5dee8c6/ruamel_yaml_clib-0.2.15-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fd4c928ddf6bce586285daa6d90680b9c291cfd045fc40aad34e445d57b1bf51", size = 766619, upload-time = "2025-11-16T16:13:39.178Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/36/2f/fa0344a9327b58b54970e56a27b32416ffbcfe4dcc0700605516708579b2/ruamel_yaml_clib-0.2.15-cp313-cp313-win32.whl", hash = "sha256:bf0846d629e160223805db9fe8cc7aec16aaa11a07310c50c8c7164efa440aec", size = 100171, upload-time = "2025-11-16T16:13:40.456Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/06/c4/c124fbcef0684fcf3c9b72374c2a8c35c94464d8694c50f37eef27f5a145/ruamel_yaml_clib-0.2.15-cp313-cp313-win_amd64.whl", hash = "sha256:45702dfbea1420ba3450bb3dd9a80b33f0badd57539c6aac09f42584303e0db6", size = 118845, upload-time = "2025-11-16T16:13:41.481Z" }, -] - [[package]] name = "ruff" version = "0.14.10" @@ -3980,23 +3135,6 @@ wheels = [ { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b7/46/f5af3402b579fd5e11573ce652019a67074317e18c1935cc0b4ba9b35552/secretstorage-3.5.0-py3-none-any.whl", hash = "sha256:0ce65888c0725fcb2c5bc0fdb8e5438eece02c523557ea40ce0703c266248137", size = 15554, upload-time = "2025-11-23T19:02:51.545Z" }, ] -[[package]] -name = "selenium" -version = "4.39.0" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -dependencies = [ - { name = "certifi" }, - { name = "trio" }, - { name = "trio-websocket" }, - { name = "typing-extensions" }, - { name = "urllib3", extra = ["socks"] }, - { name = "websocket-client" }, -] -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/af/19/27c1bf9eb1f7025632d35a956b50746efb4b10aa87f961b263fa7081f4c5/selenium-4.39.0.tar.gz", hash = "sha256:12f3325f02d43b6c24030fc9602b34a3c6865abbb1db9406641d13d108aa1889", size = 928575, upload-time = "2025-12-06T23:12:34.896Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/58/d0/55a6b7c6f35aad4c8a54be0eb7a52c1ff29a59542fc3e655f0ecbb14456d/selenium-4.39.0-py3-none-any.whl", hash = "sha256:c85f65d5610642ca0f47dae9d5cc117cd9e831f74038bc09fe1af126288200f9", size = 9655249, upload-time = "2025-12-06T23:12:33.085Z" }, -] - [[package]] name = "semver" version = "3.0.4" @@ -4019,21 +3157,6 @@ wheels = [ { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/88/43/1c586f9f413765201234541857cb82fda076f4b0f7bad4a0ec248da39cf3/sentry_sdk-2.49.0-py2.py3-none-any.whl", hash = "sha256:6ea78499133874445a20fe9c826c9e960070abeb7ae0cdf930314ab16bb97aa0", size = 415693, upload-time = "2026-01-08T09:56:21.872Z" }, ] -[[package]] -name = "service-identity" -version = "24.2.0" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -dependencies = [ - { name = "attrs" }, - { name = "cryptography" }, - { name = "pyasn1" }, - { name = "pyasn1-modules" }, -] -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/07/a5/dfc752b979067947261dbbf2543470c58efe735c3c1301dd870ef27830ee/service_identity-24.2.0.tar.gz", hash = "sha256:b8683ba13f0d39c6cd5d625d2c5f65421d6d707b013b375c355751557cbe8e09", size = 39245, upload-time = "2024-10-26T07:21:57.736Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/08/2c/ca6dd598b384bc1ce581e24aaae0f2bed4ccac57749d5c3befbb5e742081/service_identity-24.2.0-py3-none-any.whl", hash = "sha256:6b047fbd8a84fd0bb0d55ebce4031e400562b9196e1e0d3e0fe2b8a59f6d4a85", size = 11364, upload-time = "2024-10-26T07:21:56.302Z" }, -] - [[package]] name = "shellingham" version = "1.5.4" @@ -4052,15 +3175,6 @@ wheels = [ { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" }, ] -[[package]] -name = "smmap" -version = "5.0.2" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/44/cd/a040c4b3119bbe532e5b0732286f805445375489fceaec1f48306068ee3b/smmap-5.0.2.tar.gz", hash = "sha256:26ea65a03958fa0c8a1c7e8c7a58fdc77221b8910f6be2131affade476898ad5", size = 22329, upload-time = "2025-01-02T07:14:40.909Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/04/be/d09147ad1ec7934636ad912901c5fd7667e1c858e19d355237db0d0cd5e4/smmap-5.0.2-py3-none-any.whl", hash = "sha256:b30115f0def7d7531d22a0fb6502488d879e75b260a9db4d0819cfb25403af5e", size = 24303, upload-time = "2025-01-02T07:14:38.724Z" }, -] - [[package]] name = "sniffio" version = "1.3.1" @@ -4141,18 +3255,6 @@ wheels = [ { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d9/52/1064f510b141bd54025f9b55105e26d1fa970b9be67ad766380a3c9b74b0/starlette-0.50.0-py3-none-any.whl", hash = "sha256:9e5391843ec9b6e472eed1365a78c8098cfceb7a74bfd4d6b1c0c0095efb3bca", size = 74033, upload-time = "2025-11-01T15:25:25.461Z" }, ] -[[package]] -name = "sympy" -version = "1.14.0" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -dependencies = [ - { name = "mpmath" }, -] -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/83/d3/803453b36afefb7c2bb238361cd4ae6125a569b4db67cd9e79846ba2d68c/sympy-1.14.0.tar.gz", hash = "sha256:d3d3fe8df1e5a0b42f0e7bdf50541697dbe7d23746e894990c030e2b05e72517", size = 7793921, upload-time = "2025-04-27T18:05:01.611Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/a2/09/77d55d46fd61b4a135c444fc97158ef34a095e5681d0a6c10b75bf356191/sympy-1.14.0-py3-none-any.whl", hash = "sha256:e091cc3e99d2141a0ba2847328f5479b05d94a6635cb96148ccb3f34671bd8f5", size = 6299353, upload-time = "2025-04-27T18:04:59.103Z" }, -] - [[package]] name = "tavily-python" version = "0.7.17" @@ -4209,24 +3311,6 @@ wheels = [ { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/93/e0/6cc82a562bc6365785a3ff0af27a2a092d57c47d7a81d9e2295d8c36f011/tiktoken-0.12.0-cp313-cp313t-win_amd64.whl", hash = "sha256:dc2dd125a62cb2b3d858484d6c614d136b5b848976794edfb63688d539b8b93f", size = 878777, upload-time = "2025-10-06T20:22:18.036Z" }, ] -[[package]] -name = "tornado" -version = "6.4.2" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/59/45/a0daf161f7d6f36c3ea5fc0c2de619746cc3dd4c76402e9db545bd920f63/tornado-6.4.2.tar.gz", hash = "sha256:92bad5b4746e9879fd7bf1eb21dce4e3fc5128d71601f80005afa39237ad620b", size = 501135, upload-time = "2024-11-22T03:06:38.036Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/26/7e/71f604d8cea1b58f82ba3590290b66da1e72d840aeb37e0d5f7291bd30db/tornado-6.4.2-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:e828cce1123e9e44ae2a50a9de3055497ab1d0aeb440c5ac23064d9e44880da1", size = 436299, upload-time = "2024-11-22T03:06:20.162Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/96/44/87543a3b99016d0bf54fdaab30d24bf0af2e848f1d13d34a3a5380aabe16/tornado-6.4.2-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:072ce12ada169c5b00b7d92a99ba089447ccc993ea2143c9ede887e0937aa803", size = 434253, upload-time = "2024-11-22T03:06:22.39Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/cb/fb/fdf679b4ce51bcb7210801ef4f11fdac96e9885daa402861751353beea6e/tornado-6.4.2-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a017d239bd1bb0919f72af256a970624241f070496635784d9bf0db640d3fec", size = 437602, upload-time = "2024-11-22T03:06:24.214Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/4f/3b/e31aeffffc22b475a64dbeb273026a21b5b566f74dee48742817626c47dc/tornado-6.4.2-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c36e62ce8f63409301537222faffcef7dfc5284f27eec227389f2ad11b09d946", size = 436972, upload-time = "2024-11-22T03:06:25.559Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/22/55/b78a464de78051a30599ceb6983b01d8f732e6f69bf37b4ed07f642ac0fc/tornado-6.4.2-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bca9eb02196e789c9cb5c3c7c0f04fb447dc2adffd95265b2c7223a8a615ccbf", size = 437173, upload-time = "2024-11-22T03:06:27.584Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/79/5e/be4fb0d1684eb822c9a62fb18a3e44a06188f78aa466b2ad991d2ee31104/tornado-6.4.2-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:304463bd0772442ff4d0f5149c6f1c2135a1fae045adf070821c6cdc76980634", size = 437892, upload-time = "2024-11-22T03:06:28.933Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f5/33/4f91fdd94ea36e1d796147003b490fe60a0215ac5737b6f9c65e160d4fe0/tornado-6.4.2-cp38-abi3-musllinux_1_2_i686.whl", hash = "sha256:c82c46813ba483a385ab2a99caeaedf92585a1f90defb5693351fa7e4ea0bf73", size = 437334, upload-time = "2024-11-22T03:06:30.428Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/2b/ae/c1b22d4524b0e10da2f29a176fb2890386f7bd1f63aacf186444873a88a0/tornado-6.4.2-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:932d195ca9015956fa502c6b56af9eb06106140d844a335590c1ec7f5277d10c", size = 437261, upload-time = "2024-11-22T03:06:32.458Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b5/25/36dbd49ab6d179bcfc4c6c093a51795a4f3bed380543a8242ac3517a1751/tornado-6.4.2-cp38-abi3-win32.whl", hash = "sha256:2876cef82e6c5978fde1e0d5b1f919d756968d5b4282418f3146b79b58556482", size = 438463, upload-time = "2024-11-22T03:06:34.71Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/61/cc/58b1adeb1bb46228442081e746fcdbc4540905c87e8add7c277540934edb/tornado-6.4.2-cp38-abi3-win_amd64.whl", hash = "sha256:908b71bf3ff37d81073356a5fadcc660eb10c1476ee6e2725588626ce7e5ca38", size = 438907, upload-time = "2024-11-22T03:06:36.71Z" }, -] - [[package]] name = "tqdm" version = "4.67.1" @@ -4239,37 +3323,6 @@ wheels = [ { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d0/30/dc54f88dd4a2b5dc8a0279bdd7270e735851848b762aeb1c1184ed1f6b14/tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2", size = 78540, upload-time = "2024-11-24T20:12:19.698Z" }, ] -[[package]] -name = "trio" -version = "0.32.0" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -dependencies = [ - { name = "attrs" }, - { name = "cffi", marker = "implementation_name != 'pypy' and os_name == 'nt'" }, - { name = "idna" }, - { name = "outcome" }, - { name = "sniffio" }, - { name = "sortedcontainers" }, -] -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d8/ce/0041ddd9160aac0031bcf5ab786c7640d795c797e67c438e15cfedf815c8/trio-0.32.0.tar.gz", hash = "sha256:150f29ec923bcd51231e1d4c71c7006e65247d68759dd1c19af4ea815a25806b", size = 605323, upload-time = "2025-10-31T07:18:17.466Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/41/bf/945d527ff706233636c73880b22c7c953f3faeb9d6c7e2e85bfbfd0134a0/trio-0.32.0-py3-none-any.whl", hash = "sha256:4ab65984ef8370b79a76659ec87aa3a30c5c7c83ff250b4de88c29a8ab6123c5", size = 512030, upload-time = "2025-10-31T07:18:15.885Z" }, -] - -[[package]] -name = "trio-websocket" -version = "0.12.2" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -dependencies = [ - { name = "outcome" }, - { name = "trio" }, - { name = "wsproto" }, -] -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d1/3c/8b4358e81f2f2cfe71b66a267f023a91db20a817b9425dd964873796980a/trio_websocket-0.12.2.tar.gz", hash = "sha256:22c72c436f3d1e264d0910a3951934798dcc5b00ae56fc4ee079d46c7cf20fae", size = 33549, upload-time = "2025-02-25T05:16:58.947Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c7/19/eb640a397bba49ba49ef9dbe2e7e5c04202ba045b6ce2ec36e9cadc51e04/trio_websocket-0.12.2-py3-none-any.whl", hash = "sha256:df605665f1db533f4a386c94525870851096a223adcb97f72a07e8b4beba45b6", size = 21221, upload-time = "2025-02-25T05:16:57.545Z" }, -] - [[package]] name = "typer" version = "0.21.0" @@ -4371,38 +3424,6 @@ wheels = [ { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c7/b0/003792df09decd6849a5e39c28b513c06e84436a54440380862b5aeff25d/tzdata-2025.3-py2.py3-none-any.whl", hash = "sha256:06a47e5700f3081aab02b2e513160914ff0694bce9947d6b76ebd6bf57cfc5d1", size = 348521, upload-time = "2025-12-13T17:45:33.889Z" }, ] -[[package]] -name = "uefi-firmware" -version = "1.11" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -dependencies = [ - { name = "future" }, -] -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/26/ca/139ef1b190af2280e3a149391ed43028c1d19283ad556aa090c49aab3f36/uefi_firmware-1.11.tar.gz", hash = "sha256:30e2a9d138ac1608bdfc1783a936931eb6f4292723919f1db2c1509ec18a6041" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ea/20/1f56471e7f882f92b6b5be80674ddded92a39da483b648120eb61bd2e362/uefi_firmware-1.11-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:c927563154ce2e6ff522bda07c99ff3b4feba7167bf59fb516f755565a7523e7" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/72/28/137c6163823bb6f02a384c0a9dc0963dc70b0a89b077e2f3a2ed2f9f1f48/uefi_firmware-1.11-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:fe45376980d1533e8faf80af1a19e8dcd1b8170a447e1fa39934f4fa91ea0a35" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/50/1e/a5e52142e0d50979ae6cb148348fa4c1d66eb06932dacade4057025a7d7e/uefi_firmware-1.11-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1cf627f257f14ed1bb63290c658e9adb772292927ad399ef70366801dc0cb9a5" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/4f/58/37ba11ff39707b919b590dd7a8c0cae613bdc7171631963af16ef29b1f79/uefi_firmware-1.11-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4253b57873fc6127a50ca92797741a74b774d1aa3c994011bd196b21033ddc1" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e9/ab/b37ade211fc5ed704f685352fe0947cbeaf9165924fe2c0048120da4ccd4/uefi_firmware-1.11-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e71095839db1733d77e0001f906104e3084c6bc433dcc5ff8240a65a58d2945" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e6/a9/7ead995a7f5ffe77d21e3479e95ca857b7cc9c241271999cb780a2cfc950/uefi_firmware-1.11-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:da64a6cedea68934dab6186b13a48df55027429490f3be02bc910803d040d733" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/49/e2/5d0c396fab59b6c16e41271b9c74820c0107163be967db0474226ffa31dd/uefi_firmware-1.11-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:07a891147c10a6112c6ae4d76efbd07e422ceb1b34e6aa1236d9beb21a6d48b6" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e7/8c/32adad2dddd768ec0a44aa2174a44864e03e5eab68c59c047148b8e3b2d2/uefi_firmware-1.11-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:75b2de62978767e2263f3299f3ed2d5daaee47cfeb77496769c531e35c2c3393" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/59/98/04155a6f6eded9d184692533ec55fd711d9a2e5ae4a5ed416689f48beb11/uefi_firmware-1.11-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5a45d398edbb985952ee4dc1987f40aecf9af2b151a783eaa70ee0447e7a8052" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/20/61/6e63948588d4bf75e94e45b57205113322972d3b87c3b1ea0062e8e26f0f/uefi_firmware-1.11-cp312-cp312-win32.whl", hash = "sha256:cda1eec452581e6a1fdca9b62c9e4fe9ab6284ef16334fab47b9f3426bbeb841" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d6/ea/8189bac9bf10dccc6690712aa5695bbd0b82bc5cd4f20defe5892912d364/uefi_firmware-1.11-cp312-cp312-win_amd64.whl", hash = "sha256:0d544e25dab9cefa23a9c2f43226f94d50f85b3b625fe47ce3df12b530e1a1fa" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/43/0e/778e3e72005711e6e886caa3f2a6e33dba4073f679df46c4204740761cc8/uefi_firmware-1.11-cp312-cp312-win_arm64.whl", hash = "sha256:25ddff80fc4d95199ad48e304f32566447acf7a593d0cd4d293629f2e3eabd45" }, -] - -[[package]] -name = "unique-log-filter" -version = "0.1.0" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/06/5e/942062197dc9679e5d4fa112c3eb782236800cf2c238b08f116cfbd45895/unique_log_filter-0.1.0.tar.gz", hash = "sha256:411ca5b30572293fc37cd93a651da2a56d4b3a4fdafe9fdfaac14eee54d6a8db", size = 996, upload-time = "2023-12-20T16:31:01.268Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c1/e7/c5c1ddac18fee72373b03d3b091d7a5f2ef5ff96bea4491f23cc78e8aecd/unique_log_filter-0.1.0-py3-none-any.whl", hash = "sha256:e0e7012b4d5911233e02d5ae849fd716c1b8af14dfed0e456af1d6dcdd6a1195", size = 1481, upload-time = "2023-12-20T16:31:00.006Z" }, -] - [[package]] name = "urllib3" version = "2.6.2" @@ -4412,24 +3433,6 @@ wheels = [ { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/6d/b9/4095b668ea3678bf6a0af005527f39de12fb026516fb3df17495a733b7f8/urllib3-2.6.2-py3-none-any.whl", hash = "sha256:ec21cddfe7724fc7cb4ba4bea7aa8e2ef36f607a4bab81aa6ce42a13dc3f03dd", size = 131182, upload-time = "2025-12-11T15:56:38.584Z" }, ] -[package.optional-dependencies] -socks = [ - { name = "pysocks" }, -] - -[[package]] -name = "urwid" -version = "2.6.16" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -dependencies = [ - { name = "typing-extensions" }, - { name = "wcwidth" }, -] -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/98/21/ad23c9e961b2d36d57c63686a6f86768dd945d406323fb58c84f09478530/urwid-2.6.16.tar.gz", hash = "sha256:93ad239939e44c385e64aa00027878b9e5c486d59e855ec8ab5b1e1adcdb32a2", size = 848179, upload-time = "2024-10-15T16:07:24.297Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/54/cb/271a4f5a1bf4208dbdc96d85b9eae744cf4e5e11ac73eda76dc98c8fd2d7/urwid-2.6.16-py3-none-any.whl", hash = "sha256:de14896c6df9eb759ed1fd93e0384a5279e51e0dde8f621e4083f7a8368c0797", size = 297196, upload-time = "2024-10-15T16:07:22.521Z" }, -] - [[package]] name = "uuid-utils" version = "0.12.0" @@ -4569,24 +3572,6 @@ wheels = [ { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/eb/d8/0d1d2e9d3fabcf5d6840362adcf05f8cf3cd06a73358140c3a97189238ae/wcmatch-10.1-py3-none-any.whl", hash = "sha256:5848ace7dbb0476e5e55ab63c6bbd529745089343427caa5537f230cc01beb8a", size = 39854, upload-time = "2025-06-22T19:14:00.978Z" }, ] -[[package]] -name = "wcwidth" -version = "0.2.14" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/24/30/6b0809f4510673dc723187aeaf24c7f5459922d01e2f794277a3dfb90345/wcwidth-0.2.14.tar.gz", hash = "sha256:4d478375d31bc5395a3c55c40ccdf3354688364cd61c4f6adacaa9215d0b3605", size = 102293, upload-time = "2025-09-22T16:29:53.023Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/af/b5/123f13c975e9f27ab9c0770f514345bd406d0e8d3b7a0723af9d43f710af/wcwidth-0.2.14-py2.py3-none-any.whl", hash = "sha256:a7bb560c8aee30f9957e5f9895805edd20602f2d7f720186dfd906e82b4982e1", size = 37286, upload-time = "2025-09-22T16:29:51.641Z" }, -] - -[[package]] -name = "websocket-client" -version = "1.9.0" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/2c/41/aa4bf9664e4cda14c3b39865b12251e8e7d239f4cd0e3cc1b6c2ccde25c1/websocket_client-1.9.0.tar.gz", hash = "sha256:9e813624b6eb619999a97dc7958469217c3176312b3a16a4bd1bc7e08a46ec98", size = 70576, upload-time = "2025-10-07T21:16:36.495Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/34/db/b10e48aa8fff7407e67470363eac595018441cf32d5e1001567a7aeba5d2/websocket_client-1.9.0-py3-none-any.whl", hash = "sha256:af248a825037ef591efbf6ed20cc5faa03d3b47b9e5a2230a529eeee1c1fc3ef", size = 82616, upload-time = "2025-10-07T21:16:34.951Z" }, -] - [[package]] name = "websockets" version = "15.0.1" @@ -4618,18 +3603,6 @@ wheels = [ { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/fa/a8/5b41e0da817d64113292ab1f8247140aac61cbf6cfd085d6a0fa77f4984f/websockets-15.0.1-py3-none-any.whl", hash = "sha256:f7a866fbc1e97b5c617ee4116daaa09b722101d4a3c170c787450ba409f9736f", size = 169743, upload-time = "2025-03-05T20:03:39.41Z" }, ] -[[package]] -name = "werkzeug" -version = "3.1.4" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -dependencies = [ - { name = "markupsafe" }, -] -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/45/ea/b0f8eeb287f8df9066e56e831c7824ac6bab645dd6c7a8f4b2d767944f9b/werkzeug-3.1.4.tar.gz", hash = "sha256:cd3cd98b1b92dc3b7b3995038826c68097dcb16f9baa63abe35f20eafeb9fe5e", size = 864687, upload-time = "2025-11-29T02:15:22.841Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/2f/f9/9e082990c2585c744734f85bec79b5dae5df9c974ffee58fe421652c8e91/werkzeug-3.1.4-py3-none-any.whl", hash = "sha256:2ad50fb9ed09cc3af22c54698351027ace879a0b60a3b5edf5730b2f7d876905", size = 224960, upload-time = "2025-11-29T02:15:21.13Z" }, -] - [[package]] name = "win32-setctime" version = "1.2.0" @@ -4795,20 +3768,6 @@ wheels = [ { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/73/ae/b48f95715333080afb75a4504487cbe142cae1268afc482d06692d605ae6/yarl-1.22.0-py3-none-any.whl", hash = "sha256:1380560bdba02b6b6c90de54133c81c9f2a453dee9912fe58c1dcced1edb7cff", size = 46814, upload-time = "2025-10-06T14:12:53.872Z" }, ] -[[package]] -name = "z3-solver" -version = "4.13.0.0" -source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" } -sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/3a/fd/e3f5850fd04480a942aca9f9f7520d3fa5b57731335c221a11f55bb6d91a/z3-solver-4.13.0.0.tar.gz", hash = "sha256:52588e92aec7cb338fd6288ce93758ae01770f62ca0c80e8f4f2b2333feaf51b", size = 4848532, upload-time = "2024-03-07T19:20:07.192Z" } -wheels = [ - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/4e/5b/934de9f1f31b1d0f3a8da0ff2e3092136fbffe737eca52965818464af4c3/z3_solver-4.13.0.0-py2.py3-none-macosx_11_0_arm64.whl", hash = "sha256:bca7d59a699a440247537c2180c519d682c9df3520a16ce288fced61a70d253d", size = 27144281, upload-time = "2024-03-07T19:19:36.033Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/9c/20/f28dfa982bc820760117e5615d59d695d12a6fb31660f53a749be27cccca/z3_solver-4.13.0.0-py2.py3-none-macosx_11_0_x86_64.whl", hash = "sha256:4a4731fded91b32e1861e1c7c96e500da743bb9431246cac51f7c3ffc0f21b5d", size = 30107615, upload-time = "2024-03-07T19:19:45.679Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/0e/8c/9058d3998fdc2148f3e6d3497e949d5dfc77c66b1cc1cb461554c0bba954/z3_solver-4.13.0.0-py2.py3-none-manylinux2014_aarch64.whl", hash = "sha256:9d622022a3511c059915c56b2c231c84b5c1be1b82f457d7560dda3d916474fe", size = 55585725, upload-time = "2024-03-07T19:19:49.81Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c6/79/0255fe0efee7ea9db8987ced14c70028a0007d4d4aaaed8965310bbd7bb1/z3_solver-4.13.0.0-py2.py3-none-manylinux2014_x86_64.whl", hash = "sha256:8c42de82b6e3ff7ee61287d03c7af8a99f9f6554cdd1204c6b9bca96ff1cb7fb", size = 57305826, upload-time = "2024-03-07T19:19:54.261Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b9/27/ca09e1f4642b42a2972047f508bc4ecb0c5acf975c910eb0fdeaf9ec21d0/z3_solver-4.13.0.0-py2.py3-none-win32.whl", hash = "sha256:13468e1018c817b7f794898d3100f02541d15c13ab56c0785c5acdea32a066cf", size = 55429050, upload-time = "2024-03-07T19:19:58.867Z" }, - { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/25/c0/dd978c813288f6860bcfb9e4d2d1d3b311a42a2237a4766e5a0adbcaa79b/z3_solver-4.13.0.0-py2.py3-none-win_amd64.whl", hash = "sha256:3555436cfe9a5fa2d1b432fb9a5e4460e487649c22e5e68a56f7d81594d043e9", size = 58378460, upload-time = "2024-03-07T19:20:03.281Z" }, -] - [[package]] name = "zipp" version = "3.23.0" diff --git a/deploy/docker-compose.yml b/deploy/docker-compose.yml index 20f4f7d7b..a85ddb47a 100644 --- a/deploy/docker-compose.yml +++ b/deploy/docker-compose.yml @@ -208,6 +208,18 @@ services: condition: service_healthy + # CLI Agent base image build service (used by CLIContainerService for executions) + # This service only builds the image; it does not run as a long-lived container. + # Per-execution containers are dynamically created by CLIContainerService. + cli-agent-image: + image: joysafeter/cli-agent:latest + build: + context: .. + dockerfile: deploy/docker/cli-agent.Dockerfile + profiles: + - build + command: ["echo", "Image built successfully"] + # OpenClaw base image build service (used by backend to create per-user containers) # This service only builds the image; it does not run as a long-lived container. # Per-user OpenClaw instances are dynamically created by the backend's diff --git a/deploy/docker/cli-agent.Dockerfile b/deploy/docker/cli-agent.Dockerfile new file mode 100644 index 000000000..a8c1e80ff --- /dev/null +++ b/deploy/docker/cli-agent.Dockerfile @@ -0,0 +1,47 @@ +# ============================================================================= +# CLI Agent Docker Image +# ============================================================================= +# Pre-installs Claude Code CLI for autonomous agent execution. +# Used by CLIContainerService to spin up per-execution containers. +# +# Build: +# docker build -f deploy/docker/cli-agent.Dockerfile -t joysafeter/cli-agent:latest . +# ============================================================================= + +FROM node:22-slim + +# System deps +RUN apt-get update && apt-get install -y --no-install-recommends \ + git \ + curl \ + ca-certificates \ + openssh-client \ + jq \ + && rm -rf /var/lib/apt/lists/* + +# Install Claude Code CLI globally +RUN npm install -g @anthropic-ai/claude-code + +# Install Codex CLI +RUN npm install -g @openai/codex + +# Install OpenClaw (if available via npm, otherwise skip) +# RUN npm install -g openclaw@latest + +# Create non-root user +RUN groupadd -r agent && useradd -r -g agent -m -d /home/agent -s /bin/bash agent + +# Workspace directory +RUN mkdir -p /workspace && chown agent:agent /workspace + +# Default working directory +WORKDIR /workspace + +# Switch to non-root user +USER agent + +# Verify installation +RUN claude --version || echo "Claude CLI installed" + +# Default entrypoint keeps container alive for docker exec +CMD ["sleep", "infinity"] diff --git a/docs/ARCHITECTURE.md b/docs/ARCHITECTURE.md index 561d1e679..6d048a8c8 100644 --- a/docs/ARCHITECTURE.md +++ b/docs/ARCHITECTURE.md @@ -1,337 +1,571 @@ # Architecture -## Overall Architecture +## 1. Overall Architecture -JoySafeter follows a layered architecture pattern with clear separation of concerns: +JoySafeter follows a layered architecture with clear separation between API surface, orchestration, execution engines, event pipeline, and real-time delivery. + +``` +Layer 1 API Routes (app/api/v1/) + WebSocket Handlers (app/websocket/) +Layer 1.5 DispatchService — API-facing facade +Layer 2 ExecutionOrchestrator — creates Run + Execution, builds ExecutionContext +Layer 2.5 EngineRegistry — singleton, maps runtime_kind to ExecutionEngine +Layer 3 Execution Engines: CLIEngine / GraphEngine / CodeEngine / CopilotEngine +Layer 3.5 ExecutionContext callbacks -> ExecutionEventBus +Layer 4a PersistenceSubscriber + StateTransitionSubscriber (Phase 1, shared DB tx) +Layer 4b WebSocketSubscriber + TaskSyncSubscriber (Phase 2, parallel fan-out) +Layer 5 ExecutionSubscriptionManager -> WebSocket clients (/ws/executions) +``` ```mermaid flowchart TB - subgraph Row1[" "] - direction LR + subgraph L1["Layer 1 — API Surface"] + REST["/v1 REST Endpoints"] + WS_EXEC["WS /ws/executions"] + WS_NOTIF["WS /ws/notifications"] + WS_CLAW["WS /ws/openclaw/*"] + end - subgraph Frontend["Frontend Layer (Next.js + React)"] - direction TB - Canvas["DeepAgents Canvas
ReactFlow"] - CodeEditor["Code Editor
CodeMirror"] - Trace["Execution Trace
SSE Stream"] - Workspace["Workspace Manager
RBAC"] - Copilot["Copilot AI
Graph Assistant"] - end + subgraph L15["Layer 1.5 — Facade"] + DISPATCH["DispatchService"] + end - subgraph API["API Layer (FastAPI)"] - direction TB - REST["REST APIs
Auth/Graphs/Chat/Skills"] - WS["WebSocket
Chat/Copilot/Runs"] - SSE["SSE Stream
Real-time Events"] - CodeAPI["Code API
Save/Run"] - end + subgraph L2["Layer 2 — Orchestration"] + ORCH["ExecutionOrchestrator"] + end - subgraph Services["Service Layer"] - direction TB - GraphSvc["GraphService"] - SkillSvc["SkillService"] - MemorySvc["MemoryService"] - McpSvc["McpClient
Service"] - ToolSvc["ToolService"] - end + subgraph L25["Layer 2.5 — Registry"] + REG["EngineRegistry"] + end - subgraph Engine["Core Engine"] - direction TB - DeepBuilder["DeepAgents
Builder"] - CodeExec["Code Executor
Sandboxed exec()"] - Middleware["Middleware System
Memory"] - SkillSys["Skill System
Progressive Disclosure"] - MemorySys["Memory System
Long/Short-term"] - end + subgraph L3["Layer 3 — Engines"] + CLI["CLIEngine
sandbox"] + GRAPH["GraphEngine
graph"] + CODE["CodeEngine
code"] + COPILOT["CopilotEngine
copilot"] end - subgraph Row2[" "] - direction LR + subgraph L35["Layer 3.5 — Event Bus"] + CTX["ExecutionContext.emit()"] + BUS["ExecutionEventBus"] + end - subgraph Runtime["Runtime Layer"] - direction TB - LangGraph["LangGraph Runtime
StateGraph"] - Checkpoint["Checkpointer
State Persistence"] + subgraph L4["Layer 4 — Subscribers"] + direction LR + subgraph Phase1["Phase 1 (shared tx, sequential)"] + PERSIST["PersistenceSubscriber"] + STATE["StateTransitionSubscriber"] end - - subgraph Data["Data Layer"] - direction TB - PG["PostgreSQL
Graphs/Skills/Memory"] - Redis["Redis
Cache/Sessions"] + subgraph Phase2["Phase 2 (parallel fan-out)"] + WS_SUB["WebSocketSubscriber"] + TASK_SUB["TaskSyncSubscriber"] end + end - subgraph MCP["MCP Tool Ecosystem"] - direction TB - MCPServers["MCP Servers
200+ Security Tools"] - Tools["Tool Registry
Unified Management"] - end + subgraph L5["Layer 5 — Delivery"] + MGR["ExecutionSubscriptionManager"] + CLIENTS["WebSocket Clients"] end - Canvas --> REST - CodeEditor --> CodeAPI - Trace --> SSE - Workspace --> REST - Copilot --> WS + REST --> DISPATCH + DISPATCH --> ORCH + ORCH --> REG + REG --> CLI & GRAPH & CODE & COPILOT + CLI & GRAPH & CODE & COPILOT --> CTX + CTX --> BUS + BUS --> PERSIST & STATE + BUS --> WS_SUB & TASK_SUB + WS_SUB --> MGR --> CLIENTS + WS_EXEC --> MGR + + style L1 fill:#f3e5f5 + style L15 fill:#e1f5ff + style L2 fill:#fff3e0 + style L25 fill:#fff3e0 + style L3 fill:#e8f5e8 + style L35 fill:#fff8e1 + style L4 fill:#fce4ec + style L5 fill:#e0f2f1 +``` + +--- + +## 2. Core Modules - REST --> Services - WS --> Services - SSE --> Services - CodeAPI --> Services +### 2.1 Contracts — Single Source of Truth for Value Domains - Services --> Engine - Engine --> Runtime - Runtime --> Data - Runtime --> MCP +Three contract files in `core/contracts/` define every canonical value as `Literal` types + plain `set[str]` constants. All code references these definitions rather than scattering magic strings. - MCPServers --> Tools +| Contract file | Defines | +|---|---| +| `agent.py` | `DefinitionKindLiteral`, `RuntimeKindLiteral`, `DEFINITION_RUNTIME_KIND` mapping, `infer_runtime_kind()` | +| `execution.py` | `RunStatusLiteral`, `ExecutionStatusLiteral`, `TriggerSourceLiteral`, terminal/active sets | +| `error.py` | `ErrorCode` (StrEnum, ~180 codes), `ErrorSource`, `UserAction`, canonical registry sets | - style Row1 fill:transparent,stroke:transparent - style Row2 fill:transparent,stroke:transparent +### 2.2 Engine Protocol + Registry + Capabilities - style Frontend fill:#e1f5ff - style API fill:#f3e5f5 - style Services fill:#fff3e0 - style Engine fill:#e8f5e8 - style Runtime fill:#fff8e1 - style Data fill:#fce4ec - style MCP fill:#e0f2f1 +**Protocol** (`core/engine/protocol.py`): +```python +@runtime_checkable +class ExecutionEngine(Protocol): + engine_kind: str + capabilities: EngineCapabilities + + async def start(self, context: ExecutionContext, *, ...) -> None: ... + async def cancel(self, execution_id: UUID) -> None: ... + async def send_message(self, execution_id: UUID, message: str) -> None: ... ``` -### Core Modules +**ExecutionContext** is injected into every engine. Engines never touch persistence or WebSocket directly — they call `context.emit()`, `context.update_status()`, and `context.complete()`. -#### 1. Graph Build System — Two Paths +**EngineCapabilities** declares what each engine supports: -The system supports two graph building modes: +| Engine | runtime_kind | cancel | msg_inject | debug_obs | artifacts | approval | +|---|---|---|---|---|---|---| +| CLIEngine | sandbox | Y | Y | N | Y | Y | +| GraphEngine | graph | Y | N | Y | Y | Y | +| CodeEngine | code | Y | N | Y | N | N | +| CopilotEngine | copilot | Y | N | N | N | N | -```mermaid -flowchart LR - Service[GraphService] -->|graph_mode = code| CodeExec[Code Executor
exec → StateGraph.compile] - Service -->|canvas mode| DeepBuilder[DeepAgents Builder
Manager-Worker topology] +**Registry** (`core/engine/registry.py`): A module-level singleton `engine_registry` maps `runtime_kind` strings to engine instances. All four engines register at import time in `core/engine/__init__.py`: - CodeExec --> LangGraph[LangGraph Runtime] - DeepBuilder --> LangGraph +```python +engine_registry.register("sandbox", CLIEngine()) +engine_registry.register("graph", GraphEngine()) +engine_registry.register("code", CodeEngine()) +engine_registry.register("copilot", CopilotEngine()) +``` - style Service fill:#e1f5ff - style CodeExec fill:#fff3e0 - style DeepBuilder fill:#e8f5e8 +**Adding a new engine** requires: +1. Implement the `ExecutionEngine` protocol +2. Register it in `core/engine/__init__.py` +3. Add the new `runtime_kind` to `core/contracts/agent.py` (`RUNTIME_KINDS`, `DEFINITION_RUNTIME_KIND`) +4. Add error codes to `core/contracts/error.py` if needed + +### 2.3 Two-Phase Event Bus + +`core/events/bus.py` — `ExecutionEventBus` + +All execution events flow through a two-phase publish pipeline: + +- **Phase 1 (PERSIST)**: Subscribers share the caller's DB session and run **sequentially**. The bus commits once after all Phase 1 subscribers complete. This guarantees that persistence and state transitions are atomic. + - `PersistenceSubscriber` — writes `ExecutionEvent` rows, assigns `seq` numbers + - `StateTransitionSubscriber` — validates and applies status changes via state machines + +- **Phase 2 (BROADCAST)**: Subscribers run **in parallel** via `asyncio.gather`. A failure in one does not affect others. + - `WebSocketSubscriber` — pushes events to `ExecutionSubscriptionManager` for real-time delivery + - `TaskSyncSubscriber` — syncs Task status based on Run terminal status + +**Envelope** (`core/events/envelope.py`): `ExecutionEventEnvelope` is the canonical shape all subscribers receive: + +```python +@dataclass +class ExecutionEventEnvelope: + execution_id: UUID + run_id: UUID + workspace_id: UUID + event_type: ExecutionEventType | str + payload: dict[str, Any] + seq: int = 0 # filled by PersistenceSubscriber + trigger_source: str | None = None + thread_id: UUID | None = None + task_id: UUID | None = None + terminal_status: str | None = None # completion-only + error: dict[str, Any] | None = None # ErrorDescriptor via AppError.to_payload() + ... ``` -**Code Mode:** -- User writes standard LangGraph Python code in the browser editor -- Backend executes code in a sandboxed environment (restricted builtins, import whitelist, exec timeout) -- Extracts `StateGraph` instance from executed code, compiles and runs it -- Zero learning curve — LangGraph docs are the docs +**Event types** (`core/events/event_types.py`): `ExecutionEventType` StrEnum — content events (`assistant_text`, `thinking`, `tool_use_start/end`, `error`, `artifact_created`, `approval_requested/resolved`), lifecycle events (`execution_started/completed/status_change`, `run_status_change`), and copilot events. -**DeepAgents Canvas Mode:** -- Visual drag-and-drop builder for multi-agent orchestration -- Three node types: Agent, Code Agent, A2A Agent -- Builds Manager-Worker star topology via `deepagents.create_deep_agent()` +### 2.4 State Machines -#### 2. DeepAgents Multi-Agent Orchestration +`core/state_machines/` centralizes all status transition rules. -DeepAgents implements a star topology with one Manager coordinating multiple Workers: +**Engine** (`engine.py`): Generic `StateMachine` class with `validate(from, to)` and `is_terminal(status)`. -```mermaid -flowchart TB - Manager[Manager Agent
useDeepAgents=True
DeepAgent] +**Definitions** (`definitions.py`): Transition tables for 6 entities: - Manager -->|task| Worker1[Worker 1
CompiledSubAgent] - Manager -->|task| Worker2[Worker 2
CompiledSubAgent] - Manager -->|task| Worker3[Worker 3
CompiledSubAgent] - Manager -->|task| CodeAgent[CodeAgent
CompiledSubAgent] +| State Machine | Entity | Terminal States | +|---|---|---| +| `AGENT_SM` | Agent | (none — archived can revert) | +| `VERSION_SM` | AgentVersion | (none — frozen can unfreeze) | +| `RELEASE_SM` | AgentRelease | `retired` | +| `RUN_SM` | AgentRun | `succeeded`, `failed`, `cancelled` | +| `EXECUTION_SM` | Execution | `succeeded`, `failed`, `cancelled` | +| `TASK_SM` | Task | (none — done/cancelled can reopen) | - subgraph Backend["Shared Docker Backend"] - Skills["/workspace/skills/
Pre-loaded Skills"] - end +**Transitions** (`transitions.py`): `transition_run()`, `transition_execution()`, `transition_task()` are the **only** functions that modify `.status` on domain entities. `sync_task_from_run()` auto-maps Run terminal status to Task status via `RUN_TO_TASK_SYNC`. + +### 2.5 Observation Layer + +`core/observation/` — OTel-backed tracing injected into `ExecutionContext`. + +| Module | Purpose | +|---|---| +| `collector.py` | `ObservationCollector` — main entry point, injected as `context.collector` | +| `model.py` | Data models for observation spans | +| `types.py` | Type definitions | +| `otel/provider.py` | OTel TracerProvider setup | +| `otel/span_wrapper.py` | Span wrapper with JoySafeter-specific attributes | +| `otel/persistence_processor.py` | Exports spans to DB | +| `otel/broadcast_processor.py` | Exports spans to WebSocket for real-time display | +| `instrumentation/` | Engine-specific extractors: `cli_extractor.py`, `copilot_extractor.py`, `langchain_handler.py`, `file_tracker.py` | + +### 2.6 Ports & Adapters + +`core/ports/execution.py` defines Protocol interfaces that decouple `core/` from `services/`: + +- **`ExecutionEventPort`** — port for publishing execution events through the event bus. Implemented by `services/execution_event_adapter.py`, used by `core/agent/cli_backends/execution_runner.py`. +- **`ExecutionReaderPort`** — port for reading execution data without direct ORM queries in `core/`. Implemented by `services/execution_reader_adapter.py`. + +`EventContext` dataclass carries run-level metadata so event publishing can construct complete envelopes without querying the DB on every event. + +### 2.7 Error System — AppError Hierarchy + ErrorDescriptor + +`common/app_errors.py` defines a unified exception hierarchy rooted at `AppError` (a `@dataclass(slots=True)` subclass of `Exception`). + +**Category classes** (no constructors, just `_default_source`): + +``` +AppError + ├── DomainError (_default_source = "api") + ├── InfraError (_default_source = "runtime") + ├── AuthError (_default_source = "auth") + ├── ValidationError (_default_source = "validation") + ├── PermissionDeniedError(_default_source = "permission") + ├── ConflictError (_default_source = "api") + ├── RateLimitError (_default_source = "api") + └── InternalError (_default_source = "internal") +``` + +**Leaf classes** provide defaults and `**kw` pass-through: - Worker1 --> Backend - Worker2 --> Backend - Worker3 --> Backend - CodeAgent --> Backend - - style Manager fill:#e1f5ff - style Worker1 fill:#fff4e1 - style Worker2 fill:#fff4e1 - style Worker3 fill:#fff4e1 - style CodeAgent fill:#fff4e1 - style Backend fill:#e8f5e8 ``` +DomainError + ├── NotFoundError (code=NOT_FOUND) + ├── InvalidRequestError (code=BAD_REQUEST) + └── ModelConfigError (code=MODEL_*) +AuthError + └── AuthenticationError (code=UNAUTHORIZED, user_action=relogin) +PermissionDeniedError + └── AccessDeniedError (code=FORBIDDEN) +... +``` + +**ErrorDescriptor** — the canonical error payload shape, output by `AppError.to_payload()`: + +```json +{ + "code": "SKILL_NOT_FOUND", + "message": "Skill not found", + "data": {"skill_id": "..."}, + "source": "api", + "retryable": false, + "user_action": null, + "detail": null +} +``` + +This is the **single serialization chokepoint** — all transport paths (HTTP response body, WebSocket error frames, SSE error events, DB JSONB `error` columns) flow through `to_payload()`. + +**Error code registry**: `core/contracts/error.py` contains `ErrorCode` StrEnum with ~180 entries organized by domain (Generic, Auth, Agent, Run, Execution, Engine, Model, Sandbox, Skill, Tool/MCP, Task, etc.). + +### 2.8 Graph Build System + +Two paths for building agent graphs: + +| Path | definition_kind | Engine | Description | +|---|---|---|---| +| **Code Mode** | `code` | CodeEngine | User writes LangGraph Python in browser; backend exec()s in sandbox | +| **DeepAgents Canvas** | `graph` | GraphEngine | Visual drag-and-drop builder; Manager-Worker star topology | +| **CLI-backed** | `claude_code`, `codex`, `openclaw` | CLIEngine | Docker container + CLI agent runtime | +| **Copilot** | (internal) | CopilotEngine | Graph analysis and action execution | **DeepAgents Build Pipeline:** ``` build_deep_agents_graph() - ├── 1. resolve_all_configs() — pure config extraction, no side effects - ├── 2. setup shared backend — Docker sandbox if needed - ├── 3. preload_skills() — batch preload with deduplication - ├── 4. ModelResolver.resolve() — unified LLM resolution with cache - ├── 5. build workers — agent_factory per node type - └── 6. create_deep_agent() — compile and finalize + ├── 1. resolve_all_configs() — pure config extraction, no side effects + ├── 2. setup shared backend — Docker sandbox if needed + ├── 3. preload_skills() — batch preload with deduplication + ├── 4. ModelResolver.resolve() — unified LLM resolution with cache + ├── 5. build workers — agent_factory per node type + └── 6. create_deep_agent() — compile and finalize ``` -**Key Design Decisions:** -- **No inheritance** — composition of dedicated resolvers (ModelResolver, ToolResolver, SkillsLoader) -- **Config resolution is pure** — no side effects, each node resolved exactly once -- **Model resolution is unified and cached** — same resolver for node models and memory models -- **Star Topology**: Manager connects directly to all SubAgents (not chain) -- **Shared Backend**: Docker backend shared across agents for skills and code execution - -#### 3. Code Executor Security +### 2.9 Code Executor Security The code executor runs user LangGraph code with multiple security layers: | Layer | Protection | -|-------|-----------| -| **Builtins blacklist** | `open`, `eval`, `exec`, `compile`, `globals`, `locals`, `vars`, `dir` removed | -| **Import blocklist** | `os`, `sys`, `subprocess`, `socket`, `io`, `pathlib`, etc. blocked | -| **Import allowlist** | Only `langgraph`, `langchain`, `typing`, `json`, `pydantic`, etc. allowed | -| **Exec timeout** | 10 second limit via `signal.alarm` | -| **Invoke timeout** | 30 second limit via `asyncio.wait_for` | -| **Permission checks** | Save requires member role, Run requires viewer role | -| **Error sanitization** | Server file paths stripped from error messages | +|---|---| +| Builtins blacklist | `open`, `eval`, `exec`, `compile`, `globals`, `locals`, `vars`, `dir` removed | +| Import blocklist | `os`, `sys`, `subprocess`, `socket`, `io`, `pathlib`, etc. blocked | +| Import allowlist | Only `langgraph`, `langchain`, `typing`, `json`, `pydantic`, etc. allowed | +| Exec timeout | 10-second limit via `signal.alarm` | +| Invoke timeout | 30-second limit via `asyncio.wait_for` | +| Permission checks | Save requires member role, Run requires viewer role | +| Error sanitization | Server file paths stripped from error messages | -#### 4. Skill System (Progressive Disclosure) +### 2.10 Skill System -The skill system implements progressive disclosure to reduce token consumption: +Progressive disclosure to reduce token consumption: -```mermaid -sequenceDiagram - participant Node as Agent Node - participant Loader as SkillSandboxLoader - participant Backend as Docker Backend +- **SkillService**: CRUD with permission control and versioning +- **SkillsLoader**: Batch preloads skills to Docker backend with deduplication +- **FilesystemMiddleware**: Agent reads `/workspace/skills/{skill_name}/SKILL.md` on demand - Node->>Loader: Preload skills (batch, deduplicated) - Loader->>Backend: Write skill files to /workspace/skills/ - Backend-->>Loader: Skills loaded +Skill exceptions inherit from the unified error tree: - Node->>Node: Agent sees skill summaries in system prompt - Node->>Backend: Agent reads /workspace/skills/{skill_name}/SKILL.md - Backend-->>Node: Agent receives full skill content on demand +``` +DomainError → SkillLoadError, SkillNotFoundError (via NotFoundError) +PermissionDeniedError → SkillPermissionDeniedError (via AccessDeniedError) +InternalError → SkillFileWriteError (via InternalServiceError) ``` -**Components:** -- **SkillService**: CRUD operations with permission control -- **SkillsLoader**: Batch preloads skills to Docker backend with deduplication -- **FilesystemMiddleware**: Agent reads skill files from `/workspace/skills/` via filesystem access +### 2.11 Memory System -#### 5. Memory System (Long/Short-term Memory) +Long/short-term agent memory with middleware injection: -```mermaid -sequenceDiagram - participant User as User Input - participant Middleware as MemoryMiddleware - participant Manager as MemoryManager - participant DB as PostgreSQL - participant Agent as Agent - - User->>Middleware: User message - Middleware->>Manager: Retrieve relevant memories - Manager->>DB: Query memories by user_id/topics - DB-->>Manager: Return memories - Manager-->>Middleware: Inject memories into context - Middleware->>Agent: Enhanced prompt with memories - Agent-->>Middleware: Agent response - Middleware->>Manager: Extract and persist new memories - Manager->>DB: Persist memory -``` +- **MemoryManager**: Query and persist memories by user/topics +- **MemoryMiddleware**: Injects relevant memories into agent context, extracts new memories from responses +- **Memory types**: Fact, Procedure, Episodic, Semantic -**Memory Types:** -- **Fact**: Factual knowledge (target info, vulnerabilities) -- **Procedure**: Procedural knowledge (successful attack paths) -- **Episodic**: Session-specific experiences -- **Semantic**: General security knowledge +--- -### Core Workflows +## 3. Core Workflows -#### Graph Building Flow +### 3.1 Execution Flow ```mermaid sequenceDiagram - participant Frontend as Frontend + participant FE as Frontend participant API as REST API - participant Service as GraphService - participant Builder as DeepAgentsBuilder / CodeExecutor - participant Runtime as LangGraph Runtime - - Frontend->>API: Save graph (nodes/edges or code) - API->>Service: build graph - Service->>Service: Detect mode (code vs canvas) - - alt Code Mode - Service->>Builder: execute_code(code) - Builder->>Runtime: StateGraph.compile() - else Canvas Mode (DeepAgents) - Service->>Builder: build_deep_agents_graph(nodes, edges) - Builder->>Runtime: create_deep_agent() → compile() + participant DS as DispatchService + participant EO as ExecutionOrchestrator + participant ER as EngineRegistry + participant ENG as ExecutionEngine + participant CTX as ExecutionContext + participant BUS as ExecutionEventBus + participant P1 as Phase 1 Subscribers + participant P2 as Phase 2 Subscribers + participant WS as /ws/executions + + FE->>API: POST /runs or /executions + API->>DS: dispatch(agent_id, prompt, ...) + DS->>EO: create_and_start(...) + EO->>EO: Create AgentRun + Execution rows + EO->>ER: get(runtime_kind) + ER->>ENG: engine.start(context, ...) + + loop Engine execution + ENG->>CTX: context.emit(event_type, payload) + CTX->>BUS: publish(envelope, db) + BUS->>P1: PersistenceSubscriber.handle() [sequential, shared tx] + BUS->>P1: StateTransitionSubscriber.handle() + Note over BUS: COMMIT + BUS->>P2: WebSocketSubscriber.handle() [parallel] + BUS->>P2: TaskSyncSubscriber.handle() [parallel] + P2->>WS: push to subscribed clients end - Runtime-->>Service: CompiledStateGraph - Service-->>API: Compiled graph - API-->>Frontend: Ready + ENG->>CTX: context.complete(status, result, error) + CTX->>BUS: publish completion envelope ``` -#### Graph Execution Flow +### 3.2 Error Flow ```mermaid -sequenceDiagram - participant Frontend as Frontend - participant API as REST API - participant Service as GraphService - participant Runtime as LangGraph Runtime - participant SSE as SSE Stream - - Frontend->>API: POST /api/chat (SSE) - API->>Service: Load and compile graph - Service-->>Runtime: CompiledStateGraph - Service->>Runtime: ainvoke({"messages": [...]}) - - loop Each Node - Runtime->>Runtime: Execute node - Runtime->>SSE: Push event (node_start/node_end) - SSE-->>Frontend: Stream update - end +flowchart LR + ENG["Engine raises
or catches error"] --> APP["AppError
(or normalize_app_error)"] + APP --> TP["to_payload()
→ ErrorDescriptor"] + TP --> HTTP["HTTP JSON response"] + TP --> WSF["WS error frame"] + TP --> DB["DB JSONB
execution.error"] + TP --> ENV["Envelope.error field"] + + style APP fill:#fce4ec + style TP fill:#fff3e0 +``` + +All errors are normalized to `AppError` (or subclass), serialized via the single `to_payload()` method, and consumed identically across all transports. The frontend `ApiError` class mirrors the `ErrorDescriptor` shape with typed `source: ErrorSource`, `retryable: boolean`, and `userAction?: UserAction`. + +--- + +## 4. Data Flow + +### 4.1 WebSocket Endpoints + +| Path | Handler | Purpose | +|---|---|---| +| `/ws/executions` | `ExecutionSubscriptionHandler` | Execution event stream — subscribe, snapshot replay, live events | +| `/ws/notifications` | `NotificationManager` | User-level push notifications | +| `/ws/openclaw/dashboard` | `OpenClawHandler` | OpenClaw dashboard bridge | +| `/ws/openclaw/bridge/{user_id}` | `OpenClawHandler` | OpenClaw device bridge | + +### 4.2 Trigger Sources + +AgentRun creation accepts canonical trigger sources defined in `core/contracts/execution.py`: + +`task` | `chat` | `api` | `scheduler` | `draft_test` | `draft_copilot` | `debug` | `copilot` + +### 4.3 Single Event Source + +All engines emit events through `ExecutionContext.emit()` into `execution_events` table. The `PersistenceSubscriber` assigns monotonically increasing `seq` numbers. WebSocket clients replay from persisted events on reconnect and receive live events from the same pipeline. + +### 4.4 Frontend → Backend Communication + +| Channel | Use | +|---|---| +| REST API (`/api/v1/*`) | CRUD operations: agents, versions, releases, tasks, threads, runs, executions, skills, tools, models, workspaces | +| WebSocket `/ws/executions` | Real-time execution event streaming | +| WebSocket `/ws/notifications` | User notifications | +| Code API | Save and run user LangGraph code | + +### 4.5 Backend → Data Layer + +- **PostgreSQL**: Agent definitions, versions, releases, skills, memories, sessions, workspaces, runs, executions, execution_events, snapshots, traces +- **Redis**: Session cache, rate limiting, temporary data + +--- + +## 5. Backend File Structure - Runtime-->>Service: Final result - Service-->>SSE: End event - SSE-->>Frontend: Stream complete +``` +app/ +├── api/v1/ # REST route modules +├── common/ +│ └── app_errors.py # AppError hierarchy + to_payload() + normalize_app_error() +├── core/ +│ ├── contracts/ # Value domain registries (single source of truth) +│ │ ├── agent.py # DefinitionKind, RuntimeKind, DEFINITION_RUNTIME_KIND +│ │ ├── execution.py # RunStatus, ExecutionStatus, TriggerSource, terminal sets +│ │ └── error.py # ErrorCode (StrEnum ~180), ErrorSource, UserAction +│ ├── engine/ # Execution engine abstraction +│ │ ├── protocol.py # ExecutionEngine Protocol, ExecutionContext, EngineCapabilities +│ │ ├── registry.py # EngineRegistry singleton +│ │ ├── __init__.py # Registers 4 built-in engines at import time +│ │ ├── cli_engine.py # CLIEngine (Docker + CLI agent runtime) +│ │ ├── graph_engine.py # GraphEngine (LangGraph compiler) +│ │ ├── code_engine.py # CodeEngine (in-process code agent) +│ │ └── copilot_engine.py # CopilotEngine (graph analysis) +│ ├── events/ # Two-phase event bus +│ │ ├── bus.py # ExecutionEventBus (Phase 1 + Phase 2) +│ │ ├── envelope.py # ExecutionEventEnvelope dataclass +│ │ ├── event_types.py # ExecutionEventType StrEnum +│ │ ├── subscriber.py # EventSubscriber Protocol + SubscriberPhase enum +│ │ └── subscribers/ # Built-in subscriber implementations +│ │ ├── persistence.py # PersistenceSubscriber (Phase 1) +│ │ ├── state_transition.py# StateTransitionSubscriber (Phase 1) +│ │ ├── websocket.py # WebSocketSubscriber (Phase 2) +│ │ └── task_sync.py # TaskSyncSubscriber (Phase 2) +│ ├── state_machines/ # Centralized status transition rules +│ │ ├── definitions.py # Transition tables for 6 entities +│ │ ├── engine.py # StateMachine class + InvalidTransition error +│ │ └── transitions.py # transition_run(), transition_execution(), transition_task() +│ ├── observation/ # OTel-backed tracing +│ │ ├── collector.py # ObservationCollector (injected into ExecutionContext) +│ │ ├── otel/ # TracerProvider, span wrappers, processors +│ │ └── instrumentation/ # Engine-specific extractors +│ ├── ports/ # Protocol interfaces for core/ <-> services/ decoupling +│ │ └── execution.py # ExecutionEventPort, ExecutionReaderPort, EventContext +│ ├── agent/ # CLI agent backends (claude_code, codex, openclaw) +│ ├── copilot/ # Copilot service implementation +│ ├── graph/ # DeepAgents graph builder + code executor +│ ├── skill/ # Skill system (service, loader, exceptions) +│ ├── model/ # Model provider + credential management +│ ├── tools/ # Tool resolver + MCP integration +│ └── a2a/ # Agent-to-agent protocol support +├── models/ # SQLAlchemy ORM models +├── repositories/ # Data access layer +├── schemas/ # Pydantic request/response schemas +├── services/ # Service layer implementations +│ ├── dispatch_service.py # API-facing facade (Layer 1.5) +│ ├── execution_orchestrator.py # Run + Execution lifecycle (Layer 2) +│ ├── execution_event_adapter.py # ExecutionEventPort implementation +│ ├── execution_reader_adapter.py# ExecutionReaderPort implementation +│ ├── runner_factory.py # Creates CLI execution runners +│ ├── agent_service.py # Agent CRUD +│ ├── agent_version_service.py # Version management +│ ├── agent_release_service.py # Release lifecycle +│ ├── agent_run_service.py # Run queries +│ ├── copilot_service.py # Copilot streaming +│ ├── skill_service.py # Skill CRUD + permissions +│ ├── model_service.py # Model resolution (provider_name, model_name) +│ ├── sandbox_manager.py # Sandbox pool management +│ └── ... # (40+ service modules) +├── websocket/ # WebSocket handlers +│ ├── execution_subscription_handler.py # /ws/executions handler +│ ├── execution_subscription_manager.py # Subscription registry + broadcast +│ ├── notification_manager.py # /ws/notifications +│ ├── openclaw_handler.py # /ws/openclaw/* handlers +│ └── auth.py # WS authentication +├── templates/ # Email templates (Jinja2) +└── utils/ # Shared utilities ``` -### Data Flow +--- -**Frontend ↔ Backend:** -- **REST API**: Graph configuration, skill management, tool management, workspace operations -- **WebSocket (`/ws/chat`)**: Shared chat protocol for Chat, Copilot, and Skill Creator turns; Copilot sends `extension: { kind: "copilot" }` through the same WS -- **WebSocket (`/ws/runs`)**: Real-time run observation — event replay and status updates for active agent runs -- **Code API**: Save and run user LangGraph code -- **SSE Stream**: Real-time execution status, streaming output, node execution events +## 6. Frontend Architecture -**Backend Internal:** -- **Code Mode**: `code_executor.execute_code()` → `StateGraph.compile()` → `ainvoke()` -- **Canvas Mode**: `build_deep_agents_graph()` → `create_deep_agent()` → `compile()` → `ainvoke()` -- **Copilot Turn**: `execute_copilot_turn()` → `CopilotService._get_copilot_stream()` → events persisted to `agent_run_events` via Run Center -- **LangGraph Runtime → MCP Servers → Tools**: Tool invocation and execution -- **Middleware → Agent → Model**: Request processing pipeline +### 6.1 App Router Structure -**Backend ↔ Data Layer:** -- **PostgreSQL**: Graph configurations, skills, memories, sessions, workspaces, agent runs/events/snapshots (Run Center) -- **Redis**: Cache, rate limiting, temporary data +Next.js App Router with route groups: -### Backend File Structure (Graph Module) +``` +app/ +├── (auth)/ # Auth pages (signin, signup, verify, reset-password) +├── dashboard/ # Dashboard +├── agents/[agentId]/ # Agent detail: edit, versions, releases, tasks, threads +├── executions/[executionId]/ # Execution detail + real-time trace +├── tasks/ # Task management +├── skills/ # Skill marketplace + creator +├── tools/ # Tool management +├── memory/ # Memory management +├── openclaw/ # OpenClaw dashboard +└── settings/ # Models, members, sandboxes, tokens +``` + +### 6.2 WebSocket Client Layer ``` -app/core/graph/ -├── __init__.py # Exports build_deep_agents_graph() -├── deep_agents/ -│ ├── builder.py # Build orchestration (no inheritance) -│ ├── config.py # Pure config extraction -│ ├── model_resolver.py # Unified LLM resolution with cache -│ ├── agent_factory.py # Creates agent/code_agent/a2a workers -│ ├── skills_loader.py # Batch skills preload with dedup -│ ├── tool_resolver.py # Tool name → instance resolution -│ └── middleware.py # Memory middleware -├── node_secrets.py # A2A secret hydration -└── runtime_prompt_template.py # Runtime prompt variable substitution - -app/core/code_executor.py # Sandboxed exec() for Code mode +BaseWsClient (abstract) +├── lifecycle management (connect, disconnect, reconnect) +├── authentication (ws-token) +├── heartbeat + auto-reconnect with backoff +│ +├── ExecutionWsClient /ws/executions +├── NotificationWsClient /ws/notifications +└── (OpenClaw clients) /ws/openclaw/* ``` + +`ExecutionSubscriptionManager` on the frontend subscribes to execution IDs and dispatches incoming events to the appropriate UI stores. + +### 6.3 State Management + +- **Zustand**: Client-side stores for UI state (execution trace, sidebar, editor) +- **TanStack Query**: Server state with cache invalidation (agents, skills, models, etc.) + +### 6.4 Error Consumption + +The frontend `ApiError` class (`lib/api-client.ts`) mirrors the backend `ErrorDescriptor`: + +```typescript +class ApiError extends Error { + code: string // e.g., "SKILL_NOT_FOUND" + source: ErrorSource // "api" | "engine" | "runtime" | ... + retryable: boolean // drives retry button visibility + userAction?: UserAction // "retry" | "relogin" | "configure_model" | ... +} +``` + +The `source` and `userAction` fields drive UI behavior: `relogin` triggers auth redirect, `retry` shows a retry button, `configure_model` navigates to model settings. + +### 6.5 API Client + +Unified `apiFetch()` in `lib/api-client.ts` handles: +- URL construction (`API_BASE + path`) +- CSRF token injection +- 401 auto-refresh with single-flight deduplication +- Timeout via `AbortController` +- Structured error extraction → `ApiError` diff --git a/docs/ARCHITECTURE_CN.md b/docs/ARCHITECTURE_CN.md index 177b8cb68..54a660003 100644 --- a/docs/ARCHITECTURE_CN.md +++ b/docs/ARCHITECTURE_CN.md @@ -1,268 +1,589 @@ # 架构设计 -## 整体架构 +## 1. 整体架构 -JoySafeter 采用分层架构模式,各层职责清晰: +JoySafeter 采用分层架构,API 表面、编排层、执行引擎、事件管道、实时推送各司其职。 + +``` +Layer 1 API 路由 (app/api/v1/) + WebSocket 处理器 (app/websocket/) +Layer 1.5 DispatchService — 面向 API 的门面 +Layer 2 ExecutionOrchestrator — 创建 Run + Execution,构建 ExecutionContext +Layer 2.5 EngineRegistry — 单例,runtime_kind → ExecutionEngine +Layer 3 执行引擎:CLIEngine / GraphEngine / CodeEngine / CopilotEngine +Layer 3.5 ExecutionContext 回调 → ExecutionEventBus +Layer 4a PersistenceSubscriber + StateTransitionSubscriber(第 1 阶段,共享 DB 事务) +Layer 4b WebSocketSubscriber + TaskSyncSubscriber(第 2 阶段,并行扇出) +Layer 5 ExecutionSubscriptionManager → WebSocket 客户端 (/ws/executions) +``` ```mermaid flowchart TB - subgraph Row1[" "] - direction LR + subgraph L1["Layer 1 — API 表面"] + REST["/v1 REST 端点"] + WS_EXEC["WS /ws/executions"] + WS_NOTIF["WS /ws/notifications"] + WS_CLAW["WS /ws/openclaw/*"] + end - subgraph Frontend["前端层 (Next.js + React)"] - direction TB - Canvas["DeepAgents 画布
ReactFlow"] - CodeEditor["代码编辑器
CodeMirror"] - Trace["执行追踪
SSE Stream"] - Workspace["工作空间管理
RBAC"] - Copilot["Copilot AI
图构建助手"] - end + subgraph L15["Layer 1.5 — 门面"] + DISPATCH["DispatchService"] + end - subgraph API["API 层 (FastAPI)"] - direction TB - REST["REST APIs
Auth/Graphs/Chat/Skills"] - WS["WebSocket
Chat/Copilot/Runs"] - SSE["SSE Stream
实时事件"] - CodeAPI["Code API
保存/运行"] - end + subgraph L2["Layer 2 — 编排"] + ORCH["ExecutionOrchestrator"] + end - subgraph Services["服务层"] - direction TB - GraphSvc["GraphService"] - SkillSvc["SkillService"] - MemorySvc["MemoryService"] - McpSvc["McpClient
Service"] - ToolSvc["ToolService"] - end + subgraph L25["Layer 2.5 — 注册表"] + REG["EngineRegistry"] + end - subgraph Engine["核心引擎"] - direction TB - DeepBuilder["DeepAgents
构建器"] - CodeExec["代码执行器
沙箱 exec()"] - Middleware["中间件系统
Memory"] - SkillSys["技能系统
渐进式加载"] - MemorySys["记忆系统
长/短期记忆"] - end + subgraph L3["Layer 3 — 引擎"] + CLI["CLIEngine
sandbox"] + GRAPH["GraphEngine
graph"] + CODE["CodeEngine
code"] + COPILOT["CopilotEngine
copilot"] end - subgraph Row2[" "] - direction LR + subgraph L35["Layer 3.5 — 事件总线"] + CTX["ExecutionContext.emit()"] + BUS["ExecutionEventBus"] + end - subgraph Runtime["运行时层"] - direction TB - LangGraph["LangGraph Runtime
StateGraph"] - Checkpoint["Checkpointer
状态持久化"] + subgraph L4["Layer 4 — 订阅者"] + direction LR + subgraph Phase1["第 1 阶段(共享事务,顺序执行)"] + PERSIST["PersistenceSubscriber"] + STATE["StateTransitionSubscriber"] end - - subgraph Data["数据层"] - direction TB - PG["PostgreSQL
图/技能/记忆"] - Redis["Redis
缓存/会话"] + subgraph Phase2["第 2 阶段(并行扇出)"] + WS_SUB["WebSocketSubscriber"] + TASK_SUB["TaskSyncSubscriber"] end + end - subgraph MCP["MCP 工具生态"] - direction TB - MCPServers["MCP Servers
200+ 安全工具"] - Tools["工具注册表
统一管理"] - end + subgraph L5["Layer 5 — 投递"] + MGR["ExecutionSubscriptionManager"] + CLIENTS["WebSocket 客户端"] end - Canvas --> REST - CodeEditor --> CodeAPI - Trace --> SSE - Workspace --> REST - Copilot --> WS - - REST --> Services - WS --> Services - SSE --> Services - CodeAPI --> Services - - Services --> Engine - Engine --> Runtime - Runtime --> Data - Runtime --> MCP - - MCPServers --> Tools - - style Row1 fill:transparent,stroke:transparent - style Row2 fill:transparent,stroke:transparent - - style Frontend fill:#e1f5ff - style API fill:#f3e5f5 - style Services fill:#fff3e0 - style Engine fill:#e8f5e8 - style Runtime fill:#fff8e1 - style Data fill:#fce4ec - style MCP fill:#e0f2f1 + REST --> DISPATCH + DISPATCH --> ORCH + ORCH --> REG + REG --> CLI & GRAPH & CODE & COPILOT + CLI & GRAPH & CODE & COPILOT --> CTX + CTX --> BUS + BUS --> PERSIST & STATE + BUS --> WS_SUB & TASK_SUB + WS_SUB --> MGR --> CLIENTS + WS_EXEC --> MGR + + style L1 fill:#f3e5f5 + style L15 fill:#e1f5ff + style L2 fill:#fff3e0 + style L25 fill:#fff3e0 + style L3 fill:#e8f5e8 + style L35 fill:#fff8e1 + style L4 fill:#fce4ec + style L5 fill:#e0f2f1 ``` -### 核心模块 +--- -#### 1. 图构建系统 — 两条路径 +## 2. 核心模块 -系统支持两种图构建模式: +### 2.1 契约(Contracts)— 值域唯一来源 -```mermaid -flowchart LR - Service[GraphService] -->|graph_mode = code| CodeExec[代码执行器
exec → StateGraph.compile] - Service -->|画布模式| DeepBuilder[DeepAgents 构建器
Manager-Worker 拓扑] +`core/contracts/` 下三个契约文件以 `Literal` 类型 + `set[str]` 常量定义所有规范化值。全部代码引用这些定义,不散布魔术字符串。 + +| 契约文件 | 定义内容 | +|---|---| +| `agent.py` | `DefinitionKindLiteral`、`RuntimeKindLiteral`、`DEFINITION_RUNTIME_KIND` 映射、`infer_runtime_kind()` | +| `execution.py` | `RunStatusLiteral`、`ExecutionStatusLiteral`、`TriggerSourceLiteral`、终态/活跃集合 | +| `error.py` | `ErrorCode`(StrEnum,~180 码)、`ErrorSource`、`UserAction`、规范化注册集合 | - CodeExec --> LangGraph[LangGraph Runtime] - DeepBuilder --> LangGraph +### 2.2 引擎协议 + 注册表 + 能力矩阵 - style Service fill:#e1f5ff - style CodeExec fill:#fff3e0 - style DeepBuilder fill:#e8f5e8 +**协议** (`core/engine/protocol.py`): + +```python +@runtime_checkable +class ExecutionEngine(Protocol): + engine_kind: str + capabilities: EngineCapabilities + + async def start(self, context: ExecutionContext, *, ...) -> None: ... + async def cancel(self, execution_id: UUID) -> None: ... + async def send_message(self, execution_id: UUID, message: str) -> None: ... ``` -**Code 模式:** -- 用户在浏览器编辑器中编写标准 LangGraph Python 代码 -- 后端在沙箱环境中执行代码(受限 builtins、import 白名单、执行超时) -- 从执行结果中提取 `StateGraph` 实例,编译并运行 -- 零学习成本 — LangGraph 官方文档就是使用文档 +**ExecutionContext** 在启动时注入每个引擎。引擎不直接接触持久化或 WebSocket —— 只调用 `context.emit()`、`context.update_status()` 和 `context.complete()`。 -**DeepAgents 画布模式:** -- 可视化拖拽构建多智能体编排 -- 三种节点类型:Agent、Code Agent、A2A Agent -- 通过 `deepagents.create_deep_agent()` 构建 Manager-Worker 星型拓扑 +**EngineCapabilities** 声明各引擎支持的能力: -#### 2. DeepAgents 多智能体编排 +**Agent 运行时引擎**(用户面向的 Agent 执行环境): -DeepAgents 实现星型拓扑,一个 Manager 协调多个 Worker: +| 引擎 | runtime_kind | cancel | msg_inject | debug_obs | artifacts | approval | +|---|---|---|---|---|---|---| +| CLIEngine | sandbox | Y | Y | N | Y | Y | +| GraphEngine | graph | Y | N | Y | Y | Y | +| CodeEngine | code | Y | N | Y | N | N | -```mermaid -flowchart TB - Manager[Manager Agent
useDeepAgents=True
DeepAgent] +**内部平台引擎**(复用执行管道的平台工具,非用户面向的 Agent 运行时): - Manager -->|task| Worker1[Worker 1
CompiledSubAgent] - Manager -->|task| Worker2[Worker 2
CompiledSubAgent] - Manager -->|task| Worker3[Worker 3
CompiledSubAgent] - Manager -->|task| CodeAgent[CodeAgent
CompiledSubAgent] +| 引擎 | engine_kind | cancel | msg_inject | debug_obs | artifacts | approval | +|---|---|---|---|---|---|---| +| CopilotEngine | build_copilot | Y | N | N | N | N | - subgraph Backend["共享 Docker 后端"] - Skills["/workspace/skills/
预加载技能"] - end +> CopilotEngine 是 Graph Builder AI 助手,帮助用户在画布上设计 Agent 图。它不是 Agent 运行时——没有任何用户创建的 Agent 以 `build_copilot` 作为 `runtime_kind`。它复用执行管道(Run → Execution → EventBus → WebSocket)进行流式传输和持久化。 + +**注册表** (`core/engine/registry.py`):模块级单例 `engine_registry` 将引擎键映射到引擎实例。引擎在 `core/engine/__init__.py` 导入时自动注册: + +```python +# Agent 运行时引擎(用户面向) +engine_registry.register("sandbox", CLIEngine()) +engine_registry.register("graph", GraphEngine()) +engine_registry.register("code", CodeEngine()) + +# 内部平台引擎(非用户面向的 Agent 运行时) +engine_registry.register("build_copilot", CopilotEngine()) +engine_registry.register("copilot", CopilotEngine()) # 向后兼容已有 DB 记录 +``` + +**添加新 Agent 运行时引擎**需要: +1. 实现 `ExecutionEngine` 协议 +2. 在 `core/engine/__init__.py` 中注册 +3. 在 `core/contracts/agent.py` 中添加新的 `runtime_kind`(`RUNTIME_KINDS`、`DEFINITION_RUNTIME_KIND`) +4. 如需新错误码,在 `core/contracts/error.py` 中添加 + +**添加新内部平台引擎**只需: +1. 实现 `ExecutionEngine` 协议 +2. 在 `core/engine/__init__.py` 中注册 +3. 在 `core/contracts/agent.py` 的 `INTERNAL_ENGINE_KINDS` 中添加 + +### 2.3 两阶段事件总线 + +`core/events/bus.py` — `ExecutionEventBus` + +所有执行事件通过两阶段发布管道流转: + +- **第 1 阶段(PERSIST)**:订阅者共享调用方的 DB 会话,**顺序执行**。所有第 1 阶段订阅者完成后总线统一提交。保证持久化和状态变迁的原子性。 + - `PersistenceSubscriber` — 写入 `ExecutionEvent` 行,分配 `seq` 序号 + - `StateTransitionSubscriber` — 通过状态机校验并执行状态变迁 + +- **第 2 阶段(BROADCAST)**:订阅者通过 `asyncio.gather` **并行执行**。一个失败不影响其他。 + - `WebSocketSubscriber` — 推送事件到 `ExecutionSubscriptionManager` 进行实时投递 + - `TaskSyncSubscriber` — 根据 Run 终态同步 Task 状态 + +**信封** (`core/events/envelope.py`):`ExecutionEventEnvelope` 是所有订阅者接收的规范化数据结构: + +```python +@dataclass +class ExecutionEventEnvelope: + execution_id: UUID + run_id: UUID + workspace_id: UUID + event_type: ExecutionEventType | str + payload: dict[str, Any] + seq: int = 0 # 由 PersistenceSubscriber 填充 + trigger_source: str | None = None + thread_id: UUID | None = None + task_id: UUID | None = None + terminal_status: str | None = None # 仅完成事件 + error: dict[str, Any] | None = None # ErrorDescriptor,通过 AppError.to_payload() + ... +``` + +**事件类型** (`core/events/event_types.py`):`ExecutionEventType` StrEnum —— 内容事件(`assistant_text`、`thinking`、`tool_use_start/end`、`error`、`artifact_created`、`approval_requested/resolved`)、生命周期事件(`execution_started/completed/status_change`、`run_status_change`)和 Copilot 事件。 + +### 2.4 状态机 + +`core/state_machines/` 集中管理所有状态转换规则。 + +**引擎** (`engine.py`):通用 `StateMachine` 类,提供 `validate(from, to)` 和 `is_terminal(status)`。 + +**定义** (`definitions.py`):6 个实体的转换表: + +| 状态机 | 实体 | 终态 | +|---|---|---| +| `AGENT_SM` | Agent | (无 — archived 可恢复) | +| `VERSION_SM` | AgentVersion | (无 — frozen 可解冻) | +| `RELEASE_SM` | AgentRelease | `retired` | +| `RUN_SM` | AgentRun | `succeeded`、`failed`、`cancelled` | +| `EXECUTION_SM` | Execution | `succeeded`、`failed`、`cancelled` | +| `TASK_SM` | Task | (无 — done/cancelled 可重新打开) | + +**转换函数** (`transitions.py`):`transition_run()`、`transition_execution()`、`transition_task()` 是**唯一**修改领域实体 `.status` 的函数。`sync_task_from_run()` 通过 `RUN_TO_TASK_SYNC` 自动将 Run 终态映射为 Task 状态。 + +### 2.5 观测层(Observation) + +`core/observation/` — 基于 OTel 的追踪,注入 `ExecutionContext`。 + +| 模块 | 用途 | +|---|---| +| `collector.py` | `ObservationCollector` — 主入口,注入为 `context.collector` | +| `model.py` | 观测 span 数据模型 | +| `types.py` | 类型定义 | +| `otel/provider.py` | OTel TracerProvider 设置 | +| `otel/span_wrapper.py` | Span 包装器,附加 JoySafeter 专属属性 | +| `otel/persistence_processor.py` | 将 span 导出到 DB | +| `otel/broadcast_processor.py` | 将 span 导出到 WebSocket 实时展示 | +| `instrumentation/` | 引擎专属提取器:`cli_extractor.py`、`copilot_extractor.py`、`langchain_handler.py`、`file_tracker.py` | + +### 2.6 端口与适配器(Ports & Adapters) + +`core/ports/execution.py` 定义 Protocol 接口,解耦 `core/` 和 `services/`: + +- **`ExecutionEventPort`** — 通过事件总线发布执行事件的端口。由 `services/execution_event_adapter.py` 实现,`core/agent/cli_backends/execution_runner.py` 使用。 +- **`ExecutionReaderPort`** — 在 `core/` 中读取执行数据,无需直接 ORM 查询。由 `services/execution_reader_adapter.py` 实现。 + +`EventContext` 数据类携带 Run 级元数据,使事件发布无需每次查询 DB 即可构建完整信封。 + +### 2.7 错误系统 — AppError 层次结构 + ErrorDescriptor + +`common/app_errors.py` 定义了以 `AppError`(`@dataclass(slots=True)` + `Exception` 子类)为根的统一异常层次。 + +**分类类**(无构造函数,仅 `_default_source`): + +``` +AppError + ├── DomainError (_default_source = "api") + ├── InfraError (_default_source = "runtime") + ├── AuthError (_default_source = "auth") + ├── ValidationError (_default_source = "validation") + ├── PermissionDeniedError(_default_source = "permission") + ├── ConflictError (_default_source = "api") + ├── RateLimitError (_default_source = "api") + └── InternalError (_default_source = "internal") +``` + +**叶子类**提供默认值并使用 `**kw` 透传: + +``` +DomainError + ├── NotFoundError (code=NOT_FOUND) + ├── InvalidRequestError (code=BAD_REQUEST) + └── ModelConfigError (code=MODEL_*) +AuthError + └── AuthenticationError (code=UNAUTHORIZED, user_action=relogin) +PermissionDeniedError + └── AccessDeniedError (code=FORBIDDEN) +... +``` - Worker1 --> Backend - Worker2 --> Backend - Worker3 --> Backend - CodeAgent --> Backend - - style Manager fill:#e1f5ff - style Worker1 fill:#fff4e1 - style Worker2 fill:#fff4e1 - style Worker3 fill:#fff4e1 - style CodeAgent fill:#fff4e1 - style Backend fill:#e8f5e8 +**ErrorDescriptor** — 规范化错误载荷,由 `AppError.to_payload()` 输出: + +```json +{ + "code": "SKILL_NOT_FOUND", + "message": "技能未找到", + "data": {"skill_id": "..."}, + "source": "api", + "retryable": false, + "user_action": null, + "detail": null +} ``` +这是**唯一的序列化出口** —— 所有传输路径(HTTP 响应体、WebSocket 错误帧、SSE 错误事件、DB JSONB `error` 列)均通过 `to_payload()` 流转。 + +**错误码注册表**:`core/contracts/error.py` 包含 `ErrorCode` StrEnum,约 180 个条目,按领域分组(Generic、Auth、Agent、Run、Execution、Engine、Model、Sandbox、Skill、Tool/MCP、Task 等)。 + +### 2.8 图构建系统 + +两条路径构建 Agent 图: + +| 路径 | definition_kind | 引擎 | 说明 | +|---|---|---|---| +| **Code 模式** | `code` | CodeEngine | 用户在浏览器写 LangGraph Python;后端沙箱 exec() | +| **DeepAgents 画布** | `graph` | GraphEngine | 可视化拖拽;Manager-Worker 星型拓扑 | +| **CLI-backed** | `claude_code`、`codex`、`openclaw` | CLIEngine | Docker 容器 + CLI agent 运行时 | +| **Copilot** | (内部) | CopilotEngine | 图分析与动作执行 | + **DeepAgents 构建流水线:** ``` build_deep_agents_graph() - ├── 1. resolve_all_configs() — 纯配置提取,无副作用 - ├── 2. 初始化共享后端 — 按需创建 Docker 沙箱 - ├── 3. preload_skills() — 批量预加载,自动去重 - ├── 4. ModelResolver.resolve() — 统一 LLM 解析,带缓存 - ├── 5. 构建 Worker — agent_factory 按节点类型创建 - └── 6. create_deep_agent() — 编译并最终化 + ├── 1. resolve_all_configs() — 纯配置提取,无副作用 + ├── 2. 初始化共享后端 — 按需创建 Docker 沙箱 + ├── 3. preload_skills() — 批量预加载,自动去重 + ├── 4. ModelResolver.resolve() — 统一 LLM 解析,带缓存 + ├── 5. 构建 Worker — agent_factory 按节点类型创建 + └── 6. create_deep_agent() — 编译并最终化 ``` -**关键设计决策:** -- **无继承** — 使用专用解析器组合(ModelResolver、ToolResolver、SkillsLoader) -- **配置解析是纯函数** — 无副作用,每个节点只解析一次 -- **模型解析统一且带缓存** — 节点模型和记忆模型共用同一个解析器 -- **星型拓扑**:Manager 直接连接所有 SubAgent(非链式) -- **共享后端**:Docker 后端在所有 Agent 间共享,用于技能和代码执行 - -#### 3. 代码执行器安全 +### 2.9 代码执行器安全 代码执行器通过多层安全机制运行用户 LangGraph 代码: | 安全层 | 保护措施 | -|--------|---------| -| **Builtins 黑名单** | 移除 `open`、`eval`、`exec`、`compile`、`globals`、`locals`、`vars`、`dir` | -| **Import 黑名单** | 封锁 `os`、`sys`、`subprocess`、`socket`、`io`、`pathlib` 等 | -| **Import 白名单** | 仅允许 `langgraph`、`langchain`、`typing`、`json`、`pydantic` 等 | -| **执行超时** | exec 10 秒限制(`signal.alarm`) | -| **调用超时** | ainvoke 30 秒限制(`asyncio.wait_for`) | -| **权限检查** | 保存需要 member 角色,运行需要 viewer 角色 | -| **错误脱敏** | 从错误信息中移除服务器文件路径 | +|---|---| +| Builtins 黑名单 | 移除 `open`、`eval`、`exec`、`compile`、`globals`、`locals`、`vars`、`dir` | +| Import 黑名单 | 封锁 `os`、`sys`、`subprocess`、`socket`、`io`、`pathlib` 等 | +| Import 白名单 | 仅允许 `langgraph`、`langchain`、`typing`、`json`、`pydantic` 等 | +| 执行超时 | exec 10 秒限制(`signal.alarm`) | +| 调用超时 | ainvoke 30 秒限制(`asyncio.wait_for`) | +| 权限检查 | 保存需要 member 角色,运行需要 viewer 角色 | +| 错误脱敏 | 从错误信息中移除服务器文件路径 | -#### 4. 技能系统(渐进式加载) +### 2.10 技能系统 -```mermaid -sequenceDiagram - participant Node as Agent 节点 - participant Loader as SkillSandboxLoader - participant Backend as Docker 后端 +渐进式加载,减少 token 消耗: - Node->>Loader: 预加载技能(批量,去重) - Loader->>Backend: 写入技能文件到 /workspace/skills/ - Backend-->>Loader: 技能加载完成 +- **SkillService**:CRUD + 权限控制 + 版本管理 +- **SkillsLoader**:批量预加载到 Docker 后端,自动去重 +- **FilesystemMiddleware**:Agent 按需读取 `/workspace/skills/{skill_name}/SKILL.md` - Node->>Node: Agent 在系统提示中看到技能摘要 - Node->>Backend: Agent 按需读取 /workspace/skills/{skill_name}/SKILL.md - Backend-->>Node: Agent 获取完整技能内容 +技能异常继承自统一错误树: + +``` +DomainError → SkillLoadError、SkillNotFoundError(经 NotFoundError) +PermissionDeniedError → SkillPermissionDeniedError(经 AccessDeniedError) +InternalError → SkillFileWriteError(经 InternalServiceError) ``` -#### 5. 记忆系统(长/短期记忆) +### 2.11 记忆系统 + +长/短期 Agent 记忆,中间件注入: + +- **MemoryManager**:按用户/主题查询和持久化记忆 +- **MemoryMiddleware**:将相关记忆注入 Agent 上下文,从响应中提取新记忆 +- **记忆类型**:事实(Fact)、程序(Procedure)、情景(Episodic)、语义(Semantic) + +--- + +## 3. 核心工作流 + +### 3.1 执行流 ```mermaid sequenceDiagram - participant User as 用户输入 - participant Middleware as MemoryMiddleware - participant Manager as MemoryManager - participant DB as PostgreSQL - participant Agent as Agent - - User->>Middleware: 用户消息 - Middleware->>Manager: 检索相关记忆 - Manager->>DB: 按 user_id/主题查询 - DB-->>Manager: 返回记忆 - Manager-->>Middleware: 注入记忆到上下文 - Middleware->>Agent: 增强后的提示 - Agent-->>Middleware: Agent 响应 - Middleware->>Manager: 提取并持久化新记忆 - Manager->>DB: 保存记忆 + participant FE as 前端 + participant API as REST API + participant DS as DispatchService + participant EO as ExecutionOrchestrator + participant ER as EngineRegistry + participant ENG as ExecutionEngine + participant CTX as ExecutionContext + participant BUS as ExecutionEventBus + participant P1 as 第 1 阶段订阅者 + participant P2 as 第 2 阶段订阅者 + participant WS as /ws/executions + + FE->>API: POST /runs 或 /executions + API->>DS: dispatch(agent_id, prompt, ...) + DS->>EO: create_and_start(...) + EO->>EO: 创建 AgentRun + Execution 行 + EO->>ER: get(runtime_kind) + ER->>ENG: engine.start(context, ...) + + loop 引擎执行过程 + ENG->>CTX: context.emit(event_type, payload) + CTX->>BUS: publish(envelope, db) + BUS->>P1: PersistenceSubscriber.handle()(顺序,共享事务) + BUS->>P1: StateTransitionSubscriber.handle() + Note over BUS: COMMIT + BUS->>P2: WebSocketSubscriber.handle()(并行) + BUS->>P2: TaskSyncSubscriber.handle()(并行) + P2->>WS: 推送给已订阅的客户端 + end + + ENG->>CTX: context.complete(status, result, error) + CTX->>BUS: 发布完成信封 +``` + +### 3.2 错误流 + +```mermaid +flowchart LR + ENG["引擎抛出
或捕获错误"] --> APP["AppError
(或 normalize_app_error)"] + APP --> TP["to_payload()
→ ErrorDescriptor"] + TP --> HTTP["HTTP JSON 响应"] + TP --> WSF["WS 错误帧"] + TP --> DB["DB JSONB
execution.error"] + TP --> ENV["Envelope.error 字段"] + + style APP fill:#fce4ec + style TP fill:#fff3e0 ``` -### 数据流 +所有错误统一规范化为 `AppError`(或子类),通过唯一的 `to_payload()` 方法序列化,在所有传输路径中一致消费。前端 `ApiError` 类镜像 `ErrorDescriptor` 形状,提供类型化的 `source: ErrorSource`、`retryable: boolean` 和 `userAction?: UserAction`。 + +--- + +## 4. 数据流 + +### 4.1 WebSocket 端点 + +| 路径 | 处理器 | 用途 | +|---|---|---| +| `/ws/executions` | `ExecutionSubscriptionHandler` | 执行事件流 — 订阅、快照回放、实时事件 | +| `/ws/notifications` | `NotificationManager` | 用户级推送通知 | +| `/ws/openclaw/dashboard` | `OpenClawHandler` | OpenClaw 看板桥接 | +| `/ws/openclaw/bridge/{user_id}` | `OpenClawHandler` | OpenClaw 设备桥接 | + +### 4.2 触发来源 -**前端 ↔ 后端:** -- **REST API**:图配置、技能管理、工具管理、工作空间操作 -- **WebSocket (`/ws/chat`)**:共享聊天协议,用于 Chat、Copilot 和 Skill Creator 会话;Copilot 通过 `extension: { kind: "copilot" }` 复用同一 WS 连接 -- **WebSocket (`/ws/runs`)**:实时运行观测 — 活跃 agent run 的事件回放和状态更新 -- **Code API**:保存和运行用户 LangGraph 代码 -- **SSE Stream**:实时执行状态、流式输出、节点执行事件 +AgentRun 创建接受 `core/contracts/execution.py` 中定义的规范化触发来源: -**后端内部:** -- **Code 模式**:`code_executor.execute_code()` → `StateGraph.compile()` → `ainvoke()` -- **画布模式**:`build_deep_agents_graph()` → `create_deep_agent()` → `compile()` → `ainvoke()` -- **Copilot 回合**:`execute_copilot_turn()` → `CopilotService._get_copilot_stream()` → 事件通过 Run Center 持久化到 `agent_run_events` -- **LangGraph Runtime → MCP Servers → Tools**:工具调用和执行 -- **Middleware → Agent → Model**:请求处理管道 +`task` | `chat` | `api` | `scheduler` | `draft_test` | `draft_copilot` | `debug` | `copilot` -**后端 ↔ 数据层:** -- **PostgreSQL**:图配置、技能、记忆、会话、工作空间、agent runs/events/snapshots(Run Center) -- **Redis**:缓存、限流、临时数据 +### 4.3 单一事件源 -### 后端文件结构(图模块) +所有引擎通过 `ExecutionContext.emit()` 将事件写入 `execution_events` 表。`PersistenceSubscriber` 分配单调递增的 `seq` 序号。WebSocket 客户端重连时从已持久化的事件回放,并从同一管道接收实时事件。 +### 4.4 前端 → 后端通信 + +| 通道 | 用途 | +|---|---| +| REST API (`/api/v1/*`) | CRUD 操作:agents、versions、releases、tasks、threads、runs、executions、skills、tools、models、workspaces | +| WebSocket `/ws/executions` | 实时执行事件流 | +| WebSocket `/ws/notifications` | 用户通知 | +| Code API | 保存和运行用户 LangGraph 代码 | + +### 4.5 后端 → 数据层 + +- **PostgreSQL**:Agent 定义、版本、发布、技能、记忆、会话、工作空间、runs、executions、execution_events、snapshots、traces +- **Redis**:会话缓存、限流、临时数据 + +--- + +## 5. 后端文件结构 + +``` +app/ +├── api/v1/ # REST 路由模块 +├── common/ +│ └── app_errors.py # AppError 层次结构 + to_payload() + normalize_app_error() +├── core/ +│ ├── contracts/ # 值域注册表(唯一来源) +│ │ ├── agent.py # DefinitionKind、RuntimeKind、DEFINITION_RUNTIME_KIND +│ │ ├── execution.py # RunStatus、ExecutionStatus、TriggerSource、终态集合 +│ │ └── error.py # ErrorCode(StrEnum ~180)、ErrorSource、UserAction +│ ├── engine/ # 执行引擎抽象 +│ │ ├── protocol.py # ExecutionEngine Protocol、ExecutionContext、EngineCapabilities +│ │ ├── registry.py # EngineRegistry 单例 +│ │ ├── __init__.py # 导入时注册 4 个内建引擎 +│ │ ├── cli_engine.py # CLIEngine(Docker + CLI agent 运行时) +│ │ ├── graph_engine.py # GraphEngine(LangGraph 编译器) +│ │ ├── code_engine.py # CodeEngine(进程内代码 agent) +│ │ └── copilot_engine.py # CopilotEngine(图分析) +│ ├── events/ # 两阶段事件总线 +│ │ ├── bus.py # ExecutionEventBus(第 1 + 第 2 阶段) +│ │ ├── envelope.py # ExecutionEventEnvelope 数据类 +│ │ ├── event_types.py # ExecutionEventType StrEnum +│ │ ├── subscriber.py # EventSubscriber Protocol + SubscriberPhase 枚举 +│ │ └── subscribers/ # 内建订阅者实现 +│ │ ├── persistence.py # PersistenceSubscriber(第 1 阶段) +│ │ ├── state_transition.py# StateTransitionSubscriber(第 1 阶段) +│ │ ├── websocket.py # WebSocketSubscriber(第 2 阶段) +│ │ └── task_sync.py # TaskSyncSubscriber(第 2 阶段) +│ ├── state_machines/ # 集中化状态转换规则 +│ │ ├── definitions.py # 6 个实体的转换表 +│ │ ├── engine.py # StateMachine 类 + InvalidTransition 错误 +│ │ └── transitions.py # transition_run()、transition_execution()、transition_task() +│ ├── observation/ # 基于 OTel 的追踪 +│ │ ├── collector.py # ObservationCollector(注入 ExecutionContext) +│ │ ├── otel/ # TracerProvider、span 包装器、处理器 +│ │ └── instrumentation/ # 引擎专属提取器 +│ ├── ports/ # Protocol 接口,解耦 core/ <-> services/ +│ │ └── execution.py # ExecutionEventPort、ExecutionReaderPort、EventContext +│ ├── agent/ # CLI agent 后端(claude_code、codex、openclaw) +│ ├── copilot/ # Copilot 服务实现 +│ ├── graph/ # DeepAgents 图构建器 + 代码执行器 +│ ├── skill/ # 技能系统(服务、加载器、异常) +│ ├── model/ # 模型提供商 + 凭据管理 +│ ├── tools/ # 工具解析器 + MCP 集成 +│ └── a2a/ # Agent-to-Agent 协议支持 +├── models/ # SQLAlchemy ORM 模型 +├── repositories/ # 数据访问层 +├── schemas/ # Pydantic 请求/响应 Schema +├── services/ # 服务层实现 +│ ├── dispatch_service.py # 面向 API 的门面(Layer 1.5) +│ ├── execution_orchestrator.py # Run + Execution 生命周期(Layer 2) +│ ├── execution_event_adapter.py # ExecutionEventPort 实现 +│ ├── execution_reader_adapter.py# ExecutionReaderPort 实现 +│ ├── runner_factory.py # 创建 CLI 执行 runner +│ ├── agent_service.py # Agent CRUD +│ ├── agent_version_service.py # 版本管理 +│ ├── agent_release_service.py # 发布生命周期 +│ ├── agent_run_service.py # Run 查询 +│ ├── copilot_service.py # Copilot 流式处理 +│ ├── skill_service.py # 技能 CRUD + 权限 +│ ├── model_service.py # 模型解析(provider_name, model_name) +│ ├── sandbox_manager.py # 沙箱池管理 +│ └── ... # (40+ 服务模块) +├── websocket/ # WebSocket 处理器 +│ ├── execution_subscription_handler.py # /ws/executions 处理器 +│ ├── execution_subscription_manager.py # 订阅注册 + 广播 +│ ├── notification_manager.py # /ws/notifications +│ ├── openclaw_handler.py # /ws/openclaw/* 处理器 +│ └── auth.py # WS 认证 +├── templates/ # 邮件模板(Jinja2) +└── utils/ # 共享工具 ``` -app/core/graph/ -├── __init__.py # 导出 build_deep_agents_graph() -├── deep_agents/ -│ ├── builder.py # 构建编排(无继承) -│ ├── config.py # 纯配置提取 -│ ├── model_resolver.py # 统一 LLM 解析,带缓存 -│ ├── agent_factory.py # 创建 agent/code_agent/a2a worker -│ ├── skills_loader.py # 批量技能预加载,去重 -│ ├── tool_resolver.py # 工具名 → 实例解析 -│ └── middleware.py # Memory 中间件 -├── node_secrets.py # A2A secret 处理 -└── runtime_prompt_template.py # 运行时 prompt 变量替换 - -app/core/code_executor.py # Code 模式沙箱执行 + +--- + +## 6. 前端架构 + +### 6.1 App Router 路由结构 + +Next.js App Router + 路由分组: + +``` +app/ +├── (auth)/ # 认证页面(登录、注册、验证、重置密码) +├── dashboard/ # 仪表盘 +├── agents/[agentId]/ # Agent 详情:编辑、版本、发布、任务、会话 +├── executions/[executionId]/ # 执行详情 + 实时追踪 +├── tasks/ # 任务管理 +├── skills/ # 技能市场 + 创建器 +├── tools/ # 工具管理 +├── memory/ # 记忆管理 +├── openclaw/ # OpenClaw 看板 +└── settings/ # 模型、成员、沙箱、Token ``` + +### 6.2 WebSocket 客户端层 + +``` +BaseWsClient(抽象基类) +├── 生命周期管理(连接、断开、重连) +├── 认证(ws-token) +├── 心跳 + 指数退避自动重连 +│ +├── ExecutionWsClient /ws/executions +├── NotificationWsClient /ws/notifications +└── (OpenClaw 客户端) /ws/openclaw/* +``` + +前端 `ExecutionSubscriptionManager` 订阅执行 ID,将收到的事件分发到对应的 UI Store。 + +### 6.3 状态管理 + +- **Zustand**:客户端 Store,管理 UI 状态(执行追踪、侧边栏、编辑器) +- **TanStack Query**:服务端状态 + 缓存失效(agents、skills、models 等) + +### 6.4 错误消费 + +前端 `ApiError` 类(`lib/api-client.ts`)镜像后端 `ErrorDescriptor`: + +```typescript +class ApiError extends Error { + code: string // 如 "SKILL_NOT_FOUND" + source: ErrorSource // "api" | "engine" | "runtime" | ... + retryable: boolean // 控制重试按钮可见性 + userAction?: UserAction // "retry" | "relogin" | "configure_model" | ... +} +``` + +`source` 和 `userAction` 字段驱动 UI 行为:`relogin` 触发认证重定向,`retry` 显示重试按钮,`configure_model` 跳转到模型设置。 + +### 6.5 API 客户端 + +`lib/api-client.ts` 中的统一 `apiFetch()` 处理: +- URL 构建(`API_BASE + path`) +- CSRF Token 注入 +- 401 自动刷新 + 单航班去重 +- 基于 `AbortController` 的超时 +- 结构化错误提取 → `ApiError` diff --git a/docs/architecture-diagram.drawio b/docs/architecture-diagram.drawio index ad8942a29..af6853c5d 100644 --- a/docs/architecture-diagram.drawio +++ b/docs/architecture-diagram.drawio @@ -28,7 +28,7 @@ - + @@ -37,14 +37,14 @@ - + - + @@ -85,8 +85,8 @@ - - + + @@ -100,11 +100,11 @@ - - + + - + @@ -130,7 +130,7 @@ - + @@ -143,7 +143,7 @@ - + diff --git a/docs/architecture-diagram.mmd b/docs/architecture-diagram.mmd index c74e3e6e5..ea966d54e 100644 --- a/docs/architecture-diagram.mmd +++ b/docs/architecture-diagram.mmd @@ -42,49 +42,89 @@ graph TB subgraph WS_LAYER["Unified WebSocket Layer"] BASE_WS["BaseWsClient\n(lifecycle · auth · reconnect)"] - CHAT_WS["ChatWsClient\n/ws/chat"] - RUN_WS["RunWsClient\n/ws/runs"] + EXEC_WS["ExecutionWsClient\n/ws/executions"] NOTIF_WS["NotificationWsClient\n/ws/notifications"] end + + subgraph FE_ERROR["Error Consumption"] + API_ERROR["ApiError\nErrorDescriptor mirror\nsource · retryable · userAction"] + end end subgraph BACKEND["🐍 Backend — FastAPI · Python 3.12+"] direction TB subgraph API["REST + WebSocket API"] - REST["/v1 REST Endpoints\nChat · Runs · Graphs · Skills\nModels · Tools · Workspaces"] - WS_HANDLER["ChatWsHandler\nUnified WS Protocol\nchat · copilot · skill_creator"] - RUN_WS_HANDLER["RunWsHandler\nEvent Streaming\n& Replay"] + REST["/v1 REST Endpoints\nAgents · Versions · Releases\nTasks · Threads · Runs · Executions"] + EXEC_WS_HANDLER["ExecutionSubscriptionHandler\n/ws/executions\nSnapshot · Events · Replay"] + NOTIF_HANDLER["NotificationManager\n/ws/notifications"] + CLAW_HANDLER["OpenClawHandler\n/ws/openclaw/*"] + end + + subgraph CONTRACTS["Contracts — Single Source of Truth"] + AGENT_CONTRACT["agent.py\nDefinitionKind · RuntimeKind\nDEFINITION_RUNTIME_KIND"] + EXEC_CONTRACT["execution.py\nRunStatus · ExecutionStatus\nTriggerSource · terminal sets"] + ERROR_CONTRACT["error.py\nErrorCode StrEnum (~180)\nErrorSource · UserAction"] end subgraph SERVICES["Service Layer"] + DISPATCH_SVC["DispatchService\nAPI-facing facade (L1.5)"] + ORCH_SVC["ExecutionOrchestrator\nRun + Execution lifecycle (L2)"] + ENGINE_REG["EngineRegistry\nruntime_kind → engine (L2.5)"] MODEL_SVC["ModelService\nUnified Resolution\n(provider_name, model_name)"] - RUN_SVC["RunService\nEvent Sourcing\nRun · Event · Snapshot"] SANDBOX_SVC["SandboxService\nRAII Handle\nPer-User Isolation"] SKILL_SVC["SkillService\nVersioning\n& Collaboration"] - GRAPH_SVC["GraphService\nTemplate · Deploy\n& Lookup"] + end + + subgraph ENGINES["Execution Engines (L3)"] + CLI_ENG["CLIEngine\nruntime_kind=sandbox\nDocker + CLI agent"] + GRAPH_ENG["GraphEngine\nruntime_kind=graph\nLangGraph compiler"] + CODE_ENG["CodeEngine\nruntime_kind=code\nIn-process agent"] + COPILOT_ENG["CopilotEngine\nruntime_kind=copilot\nGraph analysis"] + end + + subgraph EVENT_BUS["Two-Phase Event Bus (L3.5–L4)"] + CTX["ExecutionContext.emit()"] + BUS["ExecutionEventBus"] + subgraph PH1["Phase 1 (shared tx, sequential)"] + PERSIST_SUB["PersistenceSubscriber\nExecutionEvent rows + seq"] + STATE_SUB["StateTransitionSubscriber\nState machine validation"] + end + subgraph PH2["Phase 2 (parallel fan-out)"] + WS_SUB["WebSocketSubscriber\n→ SubscriptionManager"] + TASK_SUB["TaskSyncSubscriber\nRun→Task status sync"] + end + end + + subgraph STATE_MACHINES["State Machines"] + SM["StateMachine engine\n6 entities:\nAgent · Version · Release\nRun · Execution · Task"] + end + + subgraph OBSERVATION["Observation Layer"] + COLLECTOR["ObservationCollector\nOTel-backed tracing\ninjected into ExecutionContext"] + end + + subgraph PORTS["Ports & Adapters"] + EVENT_PORT["ExecutionEventPort\n→ execution_event_adapter"] + READER_PORT["ExecutionReaderPort\n→ execution_reader_adapter"] + end + + subgraph APP_ERRORS["Error System"] + APPEXC["AppError hierarchy\nto_payload() → ErrorDescriptor\n{code,message,data,source,retryable}"] end subgraph CORE["Core Engine"] - DEEP_AGENTS["DeepAgents v0.4\nManager-Worker\nOrchestration"] + DEEP_AGENTS["DeepAgents\nManager-Worker\nOrchestration"] LANGGRAPH["LangGraph Engine\nStateful Graphs\nPause · Resume · Branch"] - COPILOT["Copilot Engine\nGraph Analysis\n& Action Execution"] MODEL_FACTORY["ModelFactory\nProvider Registry\n& Credential Validation"] TOOL_RESOLVER["Tool Resolver\nMCP Protocol\n200+ Tools"] SKILL_LOADER["Skill Loader\nBatch Preload\n& Deduplication"] end - - subgraph OBSERVABILITY["Observability & Cross-Cutting"] - TRACE["trace_id Propagation\n(contextvars)"] - LANGFUSE["Langfuse\nReal-Time Tracing"] - LOGURU["Loguru\nStructured Logging"] - APPEXC["AppException\nStructured Error Codes\ni18n Support"] - end end subgraph DATA["🗄️ Data Layer"] direction LR - PG["PostgreSQL\nGraphs · Runs · Events\nSnapshots · Skills\nCredentials · Memory"] + PG["PostgreSQL\nAgent Definitions · Versions · Releases\nRuns · Executions · Execution Events\nSnapshots · Skills · Credentials · Memory"] REDIS["Redis\nSession Cache\nRate Limiting"] end @@ -102,48 +142,63 @@ graph TB USER --> FRONTEND %% WebSocket hierarchy - BASE_WS --> CHAT_WS - BASE_WS --> RUN_WS + BASE_WS --> EXEC_WS BASE_WS --> NOTIF_WS %% Frontend → Backend - CHAT_WS -->|"ws-token auth"| WS_HANDLER - RUN_WS -->|"event subscribe"| RUN_WS_HANDLER + EXEC_WS -->|"execution subscribe"| EXEC_WS_HANDLER + NOTIF_WS -->|"notifications"| NOTIF_HANDLER TANSTACK -->|"REST"| REST + API_ERROR -.->|"mirrors"| APPEXC %% API → Services - WS_HANDLER --> RUN_SVC - WS_HANDLER --> MODEL_SVC - RUN_WS_HANDLER --> RUN_SVC - REST --> GRAPH_SVC + REST --> DISPATCH_SVC REST --> SKILL_SVC REST --> MODEL_SVC REST --> SANDBOX_SVC - %% Services → Core - MODEL_SVC --> MODEL_FACTORY - RUN_SVC --> DEEP_AGENTS - RUN_SVC --> LANGGRAPH - GRAPH_SVC --> COPILOT - SKILL_SVC --> SKILL_LOADER + %% Services → Engines + DISPATCH_SVC --> ORCH_SVC + ORCH_SVC --> ENGINE_REG + ENGINE_REG --> CLI_ENG + ENGINE_REG --> GRAPH_ENG + ENGINE_REG --> CODE_ENG + ENGINE_REG --> COPILOT_ENG + + %% Engines → Event Bus + CLI_ENG --> CTX + GRAPH_ENG --> CTX + CODE_ENG --> CTX + COPILOT_ENG --> CTX + CTX --> BUS + BUS --> PERSIST_SUB + BUS --> STATE_SUB + BUS --> WS_SUB + BUS --> TASK_SUB + + %% State machines + STATE_SUB --> SM + + %% Observation + COLLECTOR -.->|"injected into"| CTX + + %% Ports + EVENT_PORT -.-> BUS + CLI_ENG -.-> EVENT_PORT %% Core internals + GRAPH_ENG --> DEEP_AGENTS DEEP_AGENTS --> LANGGRAPH DEEP_AGENTS --> TOOL_RESOLVER DEEP_AGENTS --> SKILL_LOADER - COPILOT --> LANGGRAPH + COPILOT_ENG --> LANGGRAPH TOOL_RESOLVER --> MCP_TOOLS - - %% Observability - TRACE -.->|"flows through"| WS_HANDLER - TRACE -.->|"flows through"| LANGGRAPH - TRACE -.->|"persisted in"| PG - LANGFUSE -.-> DEEP_AGENTS - LOGURU -.-> SERVICES + MODEL_SVC --> MODEL_FACTORY %% Data SERVICES --> PG SERVICES --> REDIS + PERSIST_SUB --> PG %% Sandbox SANDBOX_SVC -->|"RAII handle"| DOCKER diff --git a/docs/schemas/README.md b/docs/schemas/README.md deleted file mode 100644 index b71e50e8d..000000000 --- a/docs/schemas/README.md +++ /dev/null @@ -1,29 +0,0 @@ -# Schemas - -## copilot-contract.json - -Canonical JSON Schema for Copilot WebSocket/SSE stream events and GraphAction. - -- **Source of truth**: `backend/app/core/copilot/action_types.py` (Pydantic models). -- **Regenerate**: From repo root run: - ```bash - python backend/scripts/export_copilot_schema.py - ``` -- **Consumers**: Frontend types in `frontend/types/copilot.ts` and `frontend/app/workspace/[workspaceId]/[agentId]/hooks/useCopilotWebSocketHandler.ts` are kept in sync with this schema; when adding or changing event/action fields, update the backend models, re-run the script, then update the frontend types. - -## copilot-apply-fixtures.json - -Shared test cases for the apply-actions contract: given `(initial_nodes, initial_edges, actions)`, both backend and frontend must produce the same `(expected_nodes, expected_edges)` (contract match: id/type/position/data.label/data.type/config superset for nodes; id/source/target for edges). - -- **Backend test**: `backend/tests/core/copilot/test_action_applier.py` -- **Frontend test**: `frontend/lib/utils/copilot/__tests__/actionProcessor.contract.test.ts` -- When changing apply logic (e.g. new action type or edge case), add or update a case in this file and run both tests. - -## Node type default config (contract) - -- **Source of truth for defaults**: Frontend `frontend/app/workspace/[workspaceId]/[agentId]/services/nodeRegistry.tsx` (defaultConfig and label per type). -- **Backend**: `backend/app/core/copilot/action_applier.py` keeps `NODE_DEFAULT_CONFIGS` and `NODE_LABELS` in sync with nodeRegistry when adding or changing node types; no separate JSON is used. - -## Type and naming conventions - -- **Domain / API contract types**: `frontend/types/copilot.ts` — GraphAction, CopilotResponse, stream event shapes; aligned with backend and `docs/schemas/copilot-contract.json`. diff --git a/docs/schemas/copilot-apply-fixtures.json b/docs/schemas/copilot-apply-fixtures.json deleted file mode 100644 index eb5b6580d..000000000 --- a/docs/schemas/copilot-apply-fixtures.json +++ /dev/null @@ -1,207 +0,0 @@ -[ - { - "name": "CREATE_NODE_single", - "initial_nodes": [], - "initial_edges": [], - "actions": [ - { - "type": "CREATE_NODE", - "payload": { - "id": "n1", - "type": "agent", - "label": "My Agent", - "position": { "x": 100, "y": 200 }, - "config": { "systemPrompt": "Hello" } - }, - "reasoning": "" - } - ], - "expected_nodes": [ - { - "id": "n1", - "type": "custom", - "position": { "x": 100, "y": 200 }, - "data": { - "label": "My Agent", - "type": "agent", - "config": { "systemPrompt": "Hello" } - } - } - ], - "expected_edges": [] - }, - { - "name": "CONNECT_NODES", - "initial_nodes": [ - { - "id": "a", - "type": "custom", - "position": { "x": 0, "y": 0 }, - "data": { "label": "A", "type": "agent", "config": {} } - }, - { - "id": "b", - "type": "custom", - "position": { "x": 100, "y": 0 }, - "data": { "label": "B", "type": "agent", "config": {} } - } - ], - "initial_edges": [], - "actions": [ - { - "type": "CONNECT_NODES", - "payload": { "source": "a", "target": "b" }, - "reasoning": "" - } - ], - "expected_nodes": [ - { - "id": "a", - "type": "custom", - "position": { "x": 0, "y": 0 }, - "data": { "label": "A", "type": "agent", "config": {} } - }, - { - "id": "b", - "type": "custom", - "position": { "x": 100, "y": 0 }, - "data": { "label": "B", "type": "agent", "config": {} } - } - ], - "expected_edges": [{ "id": "e-a-b", "source": "a", "target": "b" }] - }, - { - "name": "CONNECT_NODES_idempotent", - "initial_nodes": [ - { - "id": "a", - "type": "custom", - "position": { "x": 0, "y": 0 }, - "data": { "label": "A", "type": "agent", "config": {} } - }, - { - "id": "b", - "type": "custom", - "position": { "x": 100, "y": 0 }, - "data": { "label": "B", "type": "agent", "config": {} } - } - ], - "initial_edges": [{ "id": "e-a-b", "source": "a", "target": "b" }], - "actions": [ - { - "type": "CONNECT_NODES", - "payload": { "source": "a", "target": "b" }, - "reasoning": "" - } - ], - "expected_nodes": [ - { - "id": "a", - "type": "custom", - "position": { "x": 0, "y": 0 }, - "data": { "label": "A", "type": "agent", "config": {} } - }, - { - "id": "b", - "type": "custom", - "position": { "x": 100, "y": 0 }, - "data": { "label": "B", "type": "agent", "config": {} } - } - ], - "expected_edges": [{ "id": "e-a-b", "source": "a", "target": "b" }] - }, - { - "name": "DELETE_NODE", - "initial_nodes": [ - { - "id": "n1", - "type": "custom", - "position": { "x": 0, "y": 0 }, - "data": { "label": "N1", "type": "agent", "config": {} } - }, - { - "id": "n2", - "type": "custom", - "position": { "x": 100, "y": 0 }, - "data": { "label": "N2", "type": "agent", "config": {} } - } - ], - "initial_edges": [{ "id": "e-n1-n2", "source": "n1", "target": "n2" }], - "actions": [ - { "type": "DELETE_NODE", "payload": { "id": "n1" }, "reasoning": "" } - ], - "expected_nodes": [ - { - "id": "n2", - "type": "custom", - "position": { "x": 100, "y": 0 }, - "data": { "label": "N2", "type": "agent", "config": {} } - } - ], - "expected_edges": [] - }, - { - "name": "UPDATE_CONFIG", - "initial_nodes": [ - { - "id": "n1", - "type": "custom", - "position": { "x": 0, "y": 0 }, - "data": { - "label": "N1", - "type": "agent", - "config": { "systemPrompt": "Old" } - } - } - ], - "initial_edges": [], - "actions": [ - { - "type": "UPDATE_CONFIG", - "payload": { "id": "n1", "config": { "systemPrompt": "New" } }, - "reasoning": "" - } - ], - "expected_nodes": [ - { - "id": "n1", - "type": "custom", - "position": { "x": 0, "y": 0 }, - "data": { - "label": "N1", - "type": "agent", - "config": { "systemPrompt": "New" } - } - } - ], - "expected_edges": [] - }, - { - "name": "UPDATE_POSITION", - "initial_nodes": [ - { - "id": "n1", - "type": "custom", - "position": { "x": 0, "y": 0 }, - "data": { "label": "N1", "type": "agent", "config": {} } - } - ], - "initial_edges": [], - "actions": [ - { - "type": "UPDATE_POSITION", - "payload": { "id": "n1", "position": { "x": 50, "y": 100 } }, - "reasoning": "" - } - ], - "expected_nodes": [ - { - "id": "n1", - "type": "custom", - "position": { "x": 50, "y": 100 }, - "data": { "label": "N1", "type": "agent", "config": {} } - } - ], - "expected_edges": [] - } -] diff --git a/docs/schemas/copilot-contract.json b/docs/schemas/copilot-contract.json deleted file mode 100644 index d8cc455f9..000000000 --- a/docs/schemas/copilot-contract.json +++ /dev/null @@ -1,267 +0,0 @@ -{ - "$schema": "https://json-schema.org/draft/2020-12/schema", - "title": "CopilotStreamEvent", - "description": "Copilot WebSocket/SSE stream event contract. One of status, content, thought_step, tool_call, tool_result, result, done, error.", - "oneOf": [ - { - "$ref": "#/$defs/CopilotStatusEvent" - }, - { - "$ref": "#/$defs/CopilotContentEvent" - }, - { - "$ref": "#/$defs/CopilotThoughtStepEvent" - }, - { - "$ref": "#/$defs/CopilotToolCallEvent" - }, - { - "$ref": "#/$defs/CopilotToolResultEvent" - }, - { - "$ref": "#/$defs/CopilotResultEvent" - }, - { - "$ref": "#/$defs/CopilotDoneEvent" - }, - { - "$ref": "#/$defs/CopilotErrorEvent" - } - ], - "$defs": { - "CopilotStatusEvent": { - "description": "Stream event: progress status.", - "properties": { - "type": { - "const": "status", - "default": "status", - "title": "Type", - "type": "string" - }, - "stage": { - "description": "Stage identifier (e.g. thinking, processing)", - "title": "Stage", - "type": "string" - }, - "message": { - "description": "Human-readable status message", - "title": "Message", - "type": "string" - } - }, - "required": ["stage", "message"], - "title": "CopilotStatusEvent", - "type": "object" - }, - "CopilotContentEvent": { - "description": "Stream event: streaming AI response content.", - "properties": { - "type": { - "const": "content", - "default": "content", - "title": "Type", - "type": "string" - }, - "content": { - "description": "Content chunk", - "title": "Content", - "type": "string" - } - }, - "required": ["content"], - "title": "CopilotContentEvent", - "type": "object" - }, - "CopilotThoughtStepEvent": { - "description": "Stream event: single thought step in AI reasoning.", - "properties": { - "type": { - "const": "thought_step", - "default": "thought_step", - "title": "Type", - "type": "string" - }, - "step": { - "additionalProperties": true, - "description": "Step with index and content (e.g. {index, content})", - "title": "Step", - "type": "object" - } - }, - "required": ["step"], - "title": "CopilotThoughtStepEvent", - "type": "object" - }, - "CopilotToolCallEvent": { - "description": "Stream event: tool invocation started.", - "properties": { - "type": { - "const": "tool_call", - "default": "tool_call", - "title": "Type", - "type": "string" - }, - "tool": { - "description": "Tool name", - "title": "Tool", - "type": "string" - }, - "input": { - "additionalProperties": true, - "description": "Tool input parameters", - "title": "Input", - "type": "object" - } - }, - "required": ["tool"], - "title": "CopilotToolCallEvent", - "type": "object" - }, - "CopilotToolResultEvent": { - "description": "Stream event: tool execution result (action payload).", - "properties": { - "type": { - "const": "tool_result", - "default": "tool_result", - "title": "Type", - "type": "string" - }, - "action": { - "additionalProperties": true, - "description": "Action dict: type, payload, reasoning (GraphAction-compatible)", - "title": "Action", - "type": "object" - } - }, - "required": ["action"], - "title": "CopilotToolResultEvent", - "type": "object" - }, - "CopilotResultEvent": { - "description": "Stream event: final result with message and actions.", - "properties": { - "type": { - "const": "result", - "default": "result", - "title": "Type", - "type": "string" - }, - "message": { - "description": "Final assistant message", - "title": "Message", - "type": "string" - }, - "actions": { - "description": "List of GraphAction-compatible dicts", - "items": { - "additionalProperties": true, - "type": "object" - }, - "title": "Actions", - "type": "array" - }, - "batch": { - "anyOf": [ - { - "type": "boolean" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Optional batch flag for frontend", - "title": "Batch" - } - }, - "required": ["message"], - "title": "CopilotResultEvent", - "type": "object" - }, - "CopilotDoneEvent": { - "description": "Stream event: stream finished.", - "properties": { - "type": { - "const": "done", - "default": "done", - "title": "Type", - "type": "string" - } - }, - "title": "CopilotDoneEvent", - "type": "object" - }, - "CopilotErrorEvent": { - "description": "Stream event: error occurred.", - "properties": { - "type": { - "const": "error", - "default": "error", - "title": "Type", - "type": "string" - }, - "message": { - "description": "Error message", - "title": "Message", - "type": "string" - }, - "code": { - "description": "Error code for frontend mapping (e.g. CREDENTIAL_ERROR, UNKNOWN_ERROR)", - "title": "Code", - "type": "string" - } - }, - "required": ["message", "code"], - "title": "CopilotErrorEvent", - "type": "object" - }, - "GraphAction": { - "$defs": { - "GraphActionType": { - "description": "Graph action types that can be executed by Copilot.", - "enum": [ - "CREATE_NODE", - "CONNECT_NODES", - "DELETE_NODE", - "UPDATE_CONFIG", - "UPDATE_POSITION" - ], - "title": "GraphActionType", - "type": "string" - } - }, - "description": "Single graph action to be executed.", - "properties": { - "type": { - "$ref": "#/$defs/GraphActionType", - "description": "Action type" - }, - "payload": { - "additionalProperties": true, - "description": "Action payload", - "title": "Payload", - "type": "object" - }, - "reasoning": { - "description": "Reasoning for the action", - "title": "Reasoning", - "type": "string" - } - }, - "required": ["type", "payload", "reasoning"], - "title": "GraphAction", - "type": "object" - }, - "GraphActionType": { - "type": "string", - "enum": [ - "CREATE_NODE", - "CONNECT_NODES", - "DELETE_NODE", - "UPDATE_CONFIG", - "UPDATE_POSITION" - ], - "description": "Graph action type" - } - } -} diff --git a/docs/superpowers/plans/2026-03-18-skill-creator-agent.md b/docs/superpowers/plans/2026-03-18-skill-creator-agent.md deleted file mode 100644 index c9064dbae..000000000 --- a/docs/superpowers/plans/2026-03-18-skill-creator-agent.md +++ /dev/null @@ -1,1285 +0,0 @@ -# Skill Creator Agent — Implementation Plan - -> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking. - -**Goal:** Allow users to create/modify Skills through AI Agent conversation in a Docker sandbox, preview results, and save to DB after confirmation. - -**Architecture:** Reuse existing `/v1/chat/stream` SSE architecture with a new `mode="skill_creator"` parameter. A dedicated `create_skill_creator_graph()` method in `GraphService` creates a single-node DeepAgents graph with skill-creator system prompt. The `preview_skill` builtin tool reads generated files from the sandbox for frontend rendering. Frontend adds a Skill Creator page with chat + preview panel layout, and a Dashboard entry card. - -**Tech Stack:** FastAPI, LangGraph, DeepAgents, Docker Sandbox (pydantic-ai-backend), Next.js 16 (App Router), React 19, TypeScript, Zustand, TanStack Query - -**Spec:** `docs/superpowers/specs/2026-03-18-skill-creator-agent-design.md` - ---- - -## File Structure - -### Backend — New Files - -| File | Responsibility | -|------|---------------| -| `backend/app/core/tools/buildin/preview_skill.py` | `preview_skill` tool: reads skill files from sandbox host path, validates, returns structured JSON | - -### Backend — Modified Files - -| File | Change | -|------|--------| -| `backend/app/schemas/chat.py` | Add `mode` and `edit_skill_id` fields to `ChatRequest` | -| `backend/app/api/v1/chat.py` | Handle `mode="skill_creator"` branch in `chat_stream` and `chat` | -| `backend/app/services/graph_service.py` | Add `create_skill_creator_graph()` method | -| `backend/app/core/tools/tool_registry.py` | Register `preview_skill` in `_initialize_builtin_tools()` | - -### Frontend — New Files - -| File | Responsibility | -|------|---------------| -| `frontend/app/skills/creator/page.tsx` | Skill Creator page (route: `/skills/creator`) | -| `frontend/app/skills/creator/components/SkillCreatorChat.tsx` | Chat panel wrapping `ChatInterface` with `mode=skill_creator` | -| `frontend/app/skills/creator/components/SkillPreviewPanel.tsx` | Right panel: file tree + content viewer + save button | -| `frontend/app/skills/creator/components/SkillFileTree.tsx` | File directory tree | -| `frontend/app/skills/creator/components/SkillFileViewer.tsx` | Single file content with syntax highlighting | -| `frontend/app/skills/creator/components/SkillSaveDialog.tsx` | Confirmation dialog before saving to DB | - -### Frontend — Modified Files - -| File | Change | -|------|--------| -| `frontend/app/chat/hooks/useBackendChatStream.ts` | Pass `mode` through to backend in `streamChat` call | -| `frontend/app/skills/page.tsx` | Add "AI Create" button linking to `/skills/creator` | -| `frontend/app/chat/components/ChatHome.tsx` | Add Skill Creator entry card on chat home | - ---- - -## Task 1: Backend — Extend ChatRequest Schema - -**Files:** -- Modify: `backend/app/schemas/chat.py:8-15` -- Test: `backend/tests/test_schemas/test_chat.py` (create if needed) - -- [ ] **Step 1: Write the failing test** - -```python -# backend/tests/test_schemas/test_chat.py -from app.schemas.chat import ChatRequest - - -def test_chat_request_with_mode(): - req = ChatRequest( - message="Create a skill", - mode="skill_creator", - edit_skill_id="some-uuid-string", - ) - assert req.mode == "skill_creator" - assert req.edit_skill_id == "some-uuid-string" - - -def test_chat_request_mode_defaults_to_none(): - req = ChatRequest(message="Hello") - assert req.mode is None - assert req.edit_skill_id is None - - -def test_chat_request_mode_validation(): - """mode must be None or 'skill_creator'""" - import pytest - from pydantic import ValidationError - - with pytest.raises(ValidationError): - ChatRequest(message="Hello", mode="invalid_mode") -``` - -- [ ] **Step 2: Run test to verify it fails** - -Run: `cd backend && python -m pytest tests/test_schemas/test_chat.py -v` -Expected: FAIL — `ChatRequest` doesn't accept `mode` or `edit_skill_id` - -- [ ] **Step 3: Implement schema changes** - -Edit `backend/app/schemas/chat.py`: - -```python -from typing import Any, Literal - -class ChatRequest(PydanticBaseModel): - message: str = Field(..., description="用户消息") - thread_id: str | None = Field(None, description="会话线程ID,不提供则创建新会话") - graph_id: uuid.UUID | None = Field(None, description="图ID,使用指定的图进行对话") - metadata: dict[str, Any] = Field(default_factory=dict, description="元数据") - mode: Literal["skill_creator"] | None = Field(None, description="特殊模式: skill_creator 使用 Skill 创建专用 Graph") - edit_skill_id: str | None = Field(None, description="修改已有 Skill 时传入 Skill ID") -``` - -- [ ] **Step 4: Run test to verify it passes** - -Run: `cd backend && python -m pytest tests/test_schemas/test_chat.py -v` -Expected: PASS - -- [ ] **Step 5: Commit** - -```bash -git add backend/app/schemas/chat.py backend/tests/test_schemas/test_chat.py -git commit -m "feat(schema): add mode and edit_skill_id to ChatRequest" -``` - ---- - -## Task 2: Backend — preview_skill Builtin Tool - -**Files:** -- Create: `backend/app/core/tools/buildin/preview_skill.py` -- Test: `backend/tests/test_tools/test_preview_skill.py` (create) - -- [ ] **Step 1: Write the failing test** - -```python -# backend/tests/test_tools/test_preview_skill.py -import json -import os -import tempfile -from pathlib import Path - -import pytest - -from app.core.tools.buildin.preview_skill import preview_skill_in_sandbox - - -@pytest.fixture -def skill_dir(): - """Create a temporary skill directory with SKILL.md and a script.""" - with tempfile.TemporaryDirectory() as tmpdir: - skill_path = Path(tmpdir) / "skills" / "test-skill" - skill_path.mkdir(parents=True) - - # Write SKILL.md - skill_md = skill_path / "SKILL.md" - skill_md.write_text( - "---\nname: test-skill\ndescription: A test skill\n---\n# Test Skill\nInstructions here." - ) - - # Write a script - scripts_dir = skill_path / "scripts" - scripts_dir.mkdir() - (scripts_dir / "run.py").write_text("print('hello')") - - yield tmpdir - - -def test_preview_skill_returns_structured_output(skill_dir): - result = preview_skill_in_sandbox( - skill_name="test-skill", - sandbox_root=skill_dir, - ) - data = json.loads(result) - assert data["skill_name"] == "test-skill" - assert len(data["files"]) == 2 - assert data["validation"]["valid"] is True - - file_paths = [f["path"] for f in data["files"]] - assert "SKILL.md" in file_paths - assert "scripts/run.py" in file_paths - - -def test_preview_skill_missing_skill_md(skill_dir): - """A skill without SKILL.md should fail validation.""" - skill_path = Path(skill_dir) / "skills" / "bad-skill" - skill_path.mkdir(parents=True) - (skill_path / "readme.txt").write_text("no skill md") - - result = preview_skill_in_sandbox( - skill_name="bad-skill", - sandbox_root=skill_dir, - ) - data = json.loads(result) - assert data["validation"]["valid"] is False - assert any("SKILL.md" in e for e in data["validation"]["errors"]) - - -def test_preview_skill_nonexistent_dir(skill_dir): - result = preview_skill_in_sandbox( - skill_name="nonexistent", - sandbox_root=skill_dir, - ) - data = json.loads(result) - assert data["validation"]["valid"] is False - assert any("not found" in e.lower() for e in data["validation"]["errors"]) -``` - -- [ ] **Step 2: Run test to verify it fails** - -Run: `cd backend && python -m pytest tests/test_tools/test_preview_skill.py -v` -Expected: FAIL — `ImportError: cannot import name 'preview_skill_in_sandbox'` - -- [ ] **Step 3: Implement preview_skill tool** - -Create `backend/app/core/tools/buildin/preview_skill.py`: - -```python -"""preview_skill — reads generated skill files from sandbox and returns structured JSON.""" -import json -from pathlib import Path -from typing import Optional - -from app.core.skill.validators import ( - validate_skill_description, - validate_skill_name, -) -from app.core.skill.yaml_parser import parse_skill_md - - -def _detect_file_type(path: str) -> str: - ext_map = { - ".py": "python", - ".md": "markdown", - ".json": "json", - ".yaml": "yaml", - ".yml": "yaml", - ".txt": "text", - ".sh": "shell", - ".js": "javascript", - ".ts": "typescript", - } - suffix = Path(path).suffix.lower() - return ext_map.get(suffix, "text") - - -def preview_skill_in_sandbox( - skill_name: str, - sandbox_root: str, - skills_subdir: str = "skills", -) -> str: - """Read all files from a skill directory in the sandbox and return structured JSON. - - Args: - skill_name: Directory name under /workspace/skills/ in the sandbox. - sandbox_root: Host-side sandbox root (e.g., /tmp/sandboxes/{user_id}). - skills_subdir: Subdirectory within sandbox_root where skills live. - - Returns: - JSON string with skill_name, files[], and validation{}. - """ - skill_dir = Path(sandbox_root) / skills_subdir / skill_name - errors: list[str] = [] - warnings: list[str] = [] - files: list[dict] = [] - - if not skill_dir.exists() or not skill_dir.is_dir(): - return json.dumps( - { - "skill_name": skill_name, - "files": [], - "validation": { - "valid": False, - "errors": [f"Skill directory not found: {skill_name}"], - "warnings": [], - }, - }, - ensure_ascii=False, - ) - - # Collect all files recursively - for file_path in sorted(skill_dir.rglob("*")): - if not file_path.is_file(): - continue - rel_path = str(file_path.relative_to(skill_dir)) - try: - content = file_path.read_text(encoding="utf-8") - except (UnicodeDecodeError, OSError): - content = f"[Binary or unreadable file: {rel_path}]" - files.append( - { - "path": rel_path, - "content": content, - "file_type": _detect_file_type(rel_path), - "size": file_path.stat().st_size, - } - ) - - # Validate: SKILL.md must exist - skill_md_files = [f for f in files if f["path"] == "SKILL.md"] - if not skill_md_files: - errors.append("Missing required file: SKILL.md") - else: - # Parse and validate frontmatter - skill_md_content = skill_md_files[0]["content"] - try: - frontmatter, body = parse_skill_md(skill_md_content) - name = frontmatter.get("name", "") - description = frontmatter.get("description", "") - - name_valid, name_msg = validate_skill_name(name) - if not name_valid: - errors.append(f"Name validation: {name_msg}") - - desc_valid, desc_msg = validate_skill_description(description) - if not desc_valid: - errors.append(f"Description validation: {desc_msg}") - - if not body.strip(): - warnings.append("SKILL.md body is empty") - - except Exception as e: - errors.append(f"Failed to parse SKILL.md: {e}") - - return json.dumps( - { - "skill_name": skill_name, - "files": files, - "validation": { - "valid": len(errors) == 0, - "errors": errors, - "warnings": warnings, - }, - }, - ensure_ascii=False, - ) -``` - -- [ ] **Step 4: Run test to verify it passes** - -Run: `cd backend && python -m pytest tests/test_tools/test_preview_skill.py -v` -Expected: PASS - -- [ ] **Step 5: Commit** - -```bash -git add backend/app/core/tools/buildin/preview_skill.py backend/tests/test_tools/test_preview_skill.py -git commit -m "feat(tools): add preview_skill builtin tool" -``` - ---- - -## Task 3: Backend — Register preview_skill in Tool Registry - -**Files:** -- Modify: `backend/app/core/tools/tool_registry.py:611-652` - -- [ ] **Step 1: Add import and registration** - -Edit `backend/app/core/tools/tool_registry.py`, inside `_initialize_builtin_tools()`, after the existing `skill_tools.deploy_local_skill` registration block: - -```python -from app.core.tools.buildin.preview_skill import preview_skill_in_sandbox - -registry.register_builtin( - callable_func=preview_skill_in_sandbox, - name="preview_skill", - description="Preview a skill generated in the sandbox. Reads all files from the skill directory and returns structured JSON with file contents and validation results.", - category="skill", - tags={"skill", "preview", "sandbox"}, -) -``` - -- [ ] **Step 2: Verify import works** - -Run: `cd backend && python -c "from app.core.tools.buildin.preview_skill import preview_skill_in_sandbox; print('OK')"` -Expected: `OK` - -- [ ] **Step 3: Commit** - -```bash -git add backend/app/core/tools/tool_registry.py -git commit -m "feat(tools): register preview_skill in builtin tool registry" -``` - ---- - -## Task 4: Backend — GraphService.create_skill_creator_graph() - -**Files:** -- Modify: `backend/app/services/graph_service.py:667-744` (add method after `create_default_deep_agents_graph`) - -- [ ] **Step 1: Read the existing `create_default_deep_agents_graph` method** - -Read `backend/app/services/graph_service.py` lines 667-744 to understand the exact pattern. - -- [ ] **Step 2: Implement `create_skill_creator_graph()`** - -Add new method to `GraphService` class, following the same pattern as `create_default_deep_agents_graph`: - -```python -async def create_skill_creator_graph( - self, - llm_model: Optional[str] = None, - api_key: Optional[str] = None, - base_url: Optional[str] = None, - max_tokens: int = 4096, - user_id: Optional[Any] = None, - edit_skill_id: Optional[str] = None, -) -> CompiledStateGraph: - """Create a specialized graph for the Skill Creator Agent. - - Uses the same single-node DeepAgents pattern as the default graph, - but with a skill-creator-specific system prompt and tool configuration. - """ - from app.core.skill.sandbox_loader import SkillSandboxLoader - - # Build skill creator system prompt - skill_creator_system_prompt = ( - "You are a Skill Creator Agent. Your job is to help users create and modify " - "Skills (reusable capability packages) through conversation.\n\n" - "## Workflow\n" - "1. Understand the user's requirements through conversation\n" - "2. Use init_skill.py to initialize the skill directory structure\n" - "3. Write SKILL.md with proper YAML frontmatter (name, description) + markdown body\n" - "4. Create supporting files in scripts/, references/, assets/ as needed\n" - "5. Run quick_validate.py to validate the skill\n" - "6. Call the preview_skill tool to output the final result for user review\n\n" - "## Rules\n" - "- Skill names must match: ^[a-z0-9]+(-[a-z0-9]+)*$ (max 64 chars)\n" - "- SKILL.md is required with YAML frontmatter containing 'name' and 'description'\n" - "- Always validate before previewing\n" - "- The skill-creator skill is pre-loaded at /workspace/skills/skill-creator/ — " - "use its scripts (init_skill.py, quick_validate.py) and references for guidance\n" - ) - - if edit_skill_id: - skill_creator_system_prompt += ( - f"\n## Editing Mode\n" - f"The user wants to modify an existing skill (ID: {edit_skill_id}). " - f"The skill files have been pre-loaded into the sandbox. " - f"Read the existing files first, then apply the user's requested changes.\n" - ) - - graph_id = uuid.uuid4() - graph = AgentGraph( - id=graph_id, - name="Skill Creator", - description="AI-powered skill creation and modification", - ) - - node = GraphNode( - id=uuid.uuid4(), - graph_id=graph_id, - type="agent", - data={ - "label": "Skill Creator Agent", - "config": { - "useDeepAgents": True, - "skills": ["*"], - "mode": "skill_creator", - "system_prompt": skill_creator_system_prompt, - }, - }, - position={"x": 0, "y": 0}, - ) - - builder = GraphBuilder( - graph=graph, - nodes=[node], - edges=[], - llm_model=llm_model, - api_key=api_key, - base_url=base_url, - max_tokens=max_tokens, - user_id=user_id, - ) - return await builder.build() -``` - -- [ ] **Step 3: Verify no import errors** - -Run: `cd backend && python -c "from app.services.graph_service import GraphService; print('OK')"` -Expected: `OK` - -- [ ] **Step 4: Commit** - -```bash -git add backend/app/services/graph_service.py -git commit -m "feat(graph): add create_skill_creator_graph() to GraphService" -``` - ---- - -## Task 5: Backend — Wire mode="skill_creator" into Chat Stream - -**Files:** -- Modify: `backend/app/api/v1/chat.py:694-713` (inside `event_generator()` in `chat_stream`) - -- [ ] **Step 1: Read the current branching logic** - -Read `backend/app/api/v1/chat.py` lines 690-720 to see the exact graph creation branch. - -- [ ] **Step 2: Add skill_creator mode branch** - -Insert the new branch between `graph_service = GraphService(db)` (line 694) and `if payload.graph_id is None` (line 695): - -```python -graph_service = GraphService(db) - -# Skill Creator mode — dedicated graph -if payload.mode == "skill_creator": - graph = await graph_service.create_skill_creator_graph( - llm_model=llm_model, - api_key=api_key, - base_url=base_url, - max_tokens=max_tokens, - user_id=str(current_user.id), - edit_skill_id=payload.edit_skill_id, - ) -elif payload.graph_id is None: - # ... existing default graph creation -``` - -Also add the same branch in the non-streaming `chat` endpoint (around lines 554-573) following the identical pattern. - -- [ ] **Step 3: Handle edit_skill_id preloading** - -After the graph is created in skill_creator mode, add skill preloading logic. If `edit_skill_id` is provided, load the target skill into the sandbox before the agent starts: - -```python -if payload.mode == "skill_creator" and payload.edit_skill_id: - try: - from app.core.skill.sandbox_loader import SkillSandboxLoader - from app.services.skill_service import SkillService - - skill_service = SkillService(db) - loader = SkillSandboxLoader(db, user_id=str(current_user.id)) - # The sandbox backend will be available from the graph's agent node - # Preloading happens at graph build time via DeepAgentsGraphBuilder - except Exception as e: - logger.warning(f"Failed to preload skill for editing: {e}") -``` - -Note: The actual preloading mechanism depends on how `DeepAgentsGraphBuilder` initializes the sandbox. The `edit_skill_id` is passed into the graph config so the builder can handle it during `build()`. Verify by reading `deep_agents_builder.py` before implementing this step. - -- [ ] **Step 4: Test manually** - -Run: `cd backend && python -c "from app.api.v1.chat import router; print('Router loaded OK')"` -Expected: `OK` (no import errors) - -- [ ] **Step 5: Commit** - -```bash -git add backend/app/api/v1/chat.py -git commit -m "feat(chat): wire mode=skill_creator into chat_stream endpoint" -``` - ---- - -## Task 6: Frontend — Skill Creator Page Layout - -**Files:** -- Create: `frontend/app/skills/creator/page.tsx` - -- [ ] **Step 1: Create the page** - -```tsx -// frontend/app/skills/creator/page.tsx -"use client"; - -import { useSearchParams } from "next/navigation"; -import { useState, useCallback } from "react"; -import { SkillCreatorChat } from "./components/SkillCreatorChat"; -import { SkillPreviewPanel } from "./components/SkillPreviewPanel"; - -export interface SkillPreviewData { - skill_name: string; - files: Array<{ - path: string; - content: string; - file_type: string; - size: number; - }>; - validation: { - valid: boolean; - errors: string[]; - warnings: string[]; - }; -} - -export default function SkillCreatorPage() { - const searchParams = useSearchParams(); - const editSkillId = searchParams.get("edit"); - const [previewData, setPreviewData] = useState(null); - - const handlePreviewUpdate = useCallback((data: SkillPreviewData) => { - setPreviewData(data); - }, []); - - return ( -
- {/* Left: Chat panel */} -
- -
- - {/* Right: Preview panel */} -
- -
-
- ); -} -``` - -- [ ] **Step 2: Verify route exists** - -Run: `ls frontend/app/skills/creator/page.tsx` -Expected: File exists - -- [ ] **Step 3: Commit** - -```bash -git add frontend/app/skills/creator/page.tsx -git commit -m "feat(frontend): add Skill Creator page layout" -``` - ---- - -## Task 7: Frontend — SkillCreatorChat Component - -**Files:** -- Create: `frontend/app/skills/creator/components/SkillCreatorChat.tsx` - -- [ ] **Step 1: Create the component** - -This component wraps the existing chat stream hook with `mode="skill_creator"`. It intercepts `tool_end` events for `preview_skill` to extract preview data. - -```tsx -// frontend/app/skills/creator/components/SkillCreatorChat.tsx -"use client"; - -import { useState, useCallback, useRef } from "react"; -import { useBackendChatStream } from "@/app/chat/hooks/useBackendChatStream"; -import type { SkillPreviewData } from "../page"; - -interface SkillCreatorChatProps { - editSkillId: string | null; - onPreviewUpdate: (data: SkillPreviewData) => void; -} - -interface ChatMessage { - id: string; - role: "user" | "assistant"; - content: string; -} - -export function SkillCreatorChat({ - editSkillId, - onPreviewUpdate, -}: SkillCreatorChatProps) { - const [messages, setMessages] = useState([]); - const [input, setInput] = useState(""); - const [isStreaming, setIsStreaming] = useState(false); - const threadIdRef = useRef(null); - const messagesEndRef = useRef(null); - - const { sendMessage } = useBackendChatStream(setMessages as any, { - onEvent: (event: any) => { - // Intercept preview_skill tool_end events - if ( - event.type === "tool_end" && - event.data?.tool_name === "preview_skill" - ) { - try { - const previewData = JSON.parse(event.data.output); - onPreviewUpdate(previewData); - } catch { - // ignore parse errors - } - } - if (event.type === "thread_id" && event.data?.thread_id) { - threadIdRef.current = event.data.thread_id; - } - }, - }); - - const handleSend = useCallback(async () => { - if (!input.trim() || isStreaming) return; - - const userMessage = input.trim(); - setInput(""); - setMessages((prev) => [ - ...prev, - { id: crypto.randomUUID(), role: "user", content: userMessage }, - ]); - setIsStreaming(true); - - try { - await sendMessage(userMessage, { - threadId: threadIdRef.current, - graphId: null, - metadata: { - mode: "skill_creator", - edit_skill_id: editSkillId, - }, - }); - } finally { - setIsStreaming(false); - } - }, [input, isStreaming, sendMessage, editSkillId]); - - return ( -
-
-

- {editSkillId ? "AI Skill Editor" : "AI Skill Creator"} -

-
- - {/* Messages */} -
- {messages.map((msg) => ( -
-
- {msg.content} -
-
- ))} -
-
- - {/* Input */} -
-
- setInput(e.target.value)} - onKeyDown={(e) => e.key === "Enter" && !e.shiftKey && handleSend()} - placeholder={ - editSkillId - ? "Describe what you want to change..." - : "Describe the skill you want to create..." - } - disabled={isStreaming} - /> - -
-
-
- ); -} -``` - -- [ ] **Step 2: Commit** - -```bash -git add frontend/app/skills/creator/components/SkillCreatorChat.tsx -git commit -m "feat(frontend): add SkillCreatorChat component" -``` - ---- - -## Task 8: Frontend — SkillPreviewPanel + File Viewer - -**Files:** -- Create: `frontend/app/skills/creator/components/SkillPreviewPanel.tsx` -- Create: `frontend/app/skills/creator/components/SkillFileTree.tsx` -- Create: `frontend/app/skills/creator/components/SkillFileViewer.tsx` -- Create: `frontend/app/skills/creator/components/SkillSaveDialog.tsx` - -- [ ] **Step 1: Create SkillFileTree** - -```tsx -// frontend/app/skills/creator/components/SkillFileTree.tsx -"use client"; - -interface FileEntry { - path: string; - file_type: string; - size: number; -} - -interface SkillFileTreeProps { - files: FileEntry[]; - selectedPath: string | null; - onSelectFile: (path: string) => void; -} - -const FILE_ICONS: Record = { - markdown: "📄", - python: "🐍", - json: "{ }", - yaml: "⚙️", - shell: "🖥️", - text: "📝", -}; - -export function SkillFileTree({ - files, - selectedPath, - onSelectFile, -}: SkillFileTreeProps) { - return ( -
- {files.map((file) => ( - - ))} -
- ); -} -``` - -- [ ] **Step 2: Create SkillFileViewer** - -```tsx -// frontend/app/skills/creator/components/SkillFileViewer.tsx -"use client"; - -interface SkillFileViewerProps { - path: string; - content: string; - fileType: string; -} - -export function SkillFileViewer({ path, content, fileType }: SkillFileViewerProps) { - return ( -
-
- {path} -
-
-        {content}
-      
-
- ); -} -``` - -- [ ] **Step 3: Create SkillSaveDialog** - -```tsx -// frontend/app/skills/creator/components/SkillSaveDialog.tsx -"use client"; - -import { useState } from "react"; -import type { SkillPreviewData } from "../page"; - -interface SkillSaveDialogProps { - data: SkillPreviewData; - editSkillId: string | null; - onConfirm: () => void; - onCancel: () => void; - isSaving: boolean; -} - -export function SkillSaveDialog({ - data, - editSkillId, - onConfirm, - onCancel, - isSaving, -}: SkillSaveDialogProps) { - return ( -
-
-

- {editSkillId ? "Update Skill" : "Save Skill"} -

- -
-
- Name: - {data.skill_name} -
-
- Files: - {data.files.length} -
- {data.validation.warnings.length > 0 && ( -
- {data.validation.warnings.map((w, i) => ( -
⚠ {w}
- ))} -
- )} -
- -
- - -
-
-
- ); -} -``` - -- [ ] **Step 4: Create SkillPreviewPanel** - -```tsx -// frontend/app/skills/creator/components/SkillPreviewPanel.tsx -"use client"; - -import { useState, useCallback } from "react"; -import { SkillFileTree } from "./SkillFileTree"; -import { SkillFileViewer } from "./SkillFileViewer"; -import { SkillSaveDialog } from "./SkillSaveDialog"; -import type { SkillPreviewData } from "../page"; - -interface SkillPreviewPanelProps { - data: SkillPreviewData | null; - editSkillId: string | null; -} - -export function SkillPreviewPanel({ data, editSkillId }: SkillPreviewPanelProps) { - const [selectedPath, setSelectedPath] = useState(null); - const [showSaveDialog, setShowSaveDialog] = useState(false); - const [isSaving, setIsSaving] = useState(false); - - const selectedFile = data?.files.find((f) => f.path === selectedPath) ?? null; - - const handleSave = useCallback(async () => { - if (!data) return; - setIsSaving(true); - try { - const skillFiles = data.files.map((f) => ({ - path: f.path, - file_name: f.path.split("/").pop() ?? f.path, - file_type: f.file_type, - content: f.content, - size: f.size, - })); - - // Parse name/description from SKILL.md frontmatter - const skillMd = data.files.find((f) => f.path === "SKILL.md"); - const frontmatterMatch = skillMd?.content.match( - /^---\n([\s\S]*?)\n---/ - ); - let name = data.skill_name; - let description = ""; - let content = skillMd?.content ?? ""; - - if (frontmatterMatch) { - const lines = frontmatterMatch[1].split("\n"); - for (const line of lines) { - const [key, ...vals] = line.split(":"); - if (key.trim() === "name") name = vals.join(":").trim(); - if (key.trim() === "description") description = vals.join(":").trim(); - } - // content = body after frontmatter - content = (skillMd?.content ?? "").replace(/^---\n[\s\S]*?\n---\n?/, ""); - } - - const payload = { - name, - description, - content, - tags: [], - is_public: false, - files: skillFiles, - }; - - const baseUrl = process.env.NEXT_PUBLIC_BACKEND_URL ?? ""; - const url = editSkillId - ? `${baseUrl}/api/v1/skills/${editSkillId}` - : `${baseUrl}/api/v1/skills`; - - const resp = await fetch(url, { - method: editSkillId ? "PUT" : "POST", - headers: { "Content-Type": "application/json" }, - credentials: "include", - body: JSON.stringify(payload), - }); - - if (!resp.ok) { - const err = await resp.json().catch(() => ({})); - throw new Error(err.detail ?? `HTTP ${resp.status}`); - } - - setShowSaveDialog(false); - // TODO: show success toast / redirect to skills list - } catch (error) { - console.error("Failed to save skill:", error); - // TODO: show error toast - } finally { - setIsSaving(false); - } - }, [data, editSkillId]); - - if (!data) { - return ( -
- Skill preview will appear here once the Agent generates it. -
- ); - } - - return ( -
- {/* Header */} -
-
-

{data.skill_name}/

- {!data.validation.valid && ( - - {data.validation.errors.join("; ")} - - )} -
- -
- - {/* File tree */} -
- -
- - {/* File content viewer */} -
- {selectedFile ? ( - - ) : ( -
- Select a file to preview -
- )} -
- - {/* Save dialog */} - {showSaveDialog && ( - setShowSaveDialog(false)} - isSaving={isSaving} - /> - )} -
- ); -} -``` - -- [ ] **Step 5: Commit** - -```bash -git add frontend/app/skills/creator/components/ -git commit -m "feat(frontend): add Skill preview panel, file tree, viewer, save dialog" -``` - ---- - -## Task 9: Frontend — Dashboard Entry Card + Skills Page Button - -**Files:** -- Modify: `frontend/app/chat/components/ChatHome.tsx` (add Skill Creator card) -- Modify: `frontend/app/skills/page.tsx` (add "AI Create" button) - -- [ ] **Step 1: Read existing ChatHome.tsx** - -Read `frontend/app/chat/components/ChatHome.tsx` to understand the current layout and how mode cards are rendered (Rapid Mode, Deep Mode, etc.). - -- [ ] **Step 2: Add Skill Creator card to ChatHome** - -Add a new card alongside existing mode cards. Follow the existing card pattern. Example addition: - -```tsx - -
-

Skill Creator

-

AI-powered skill creation and modification

-
- -``` - -Exact classes and structure must match the existing cards — read the file first. - -- [ ] **Step 3: Add "AI Create" button to Skills page** - -Edit `frontend/app/skills/page.tsx`. Add a button/link near the tab switcher: - -```tsx -import Link from "next/link"; - -// In the header/action area of the skills page: - - - -``` - -- [ ] **Step 4: Verify no build errors** - -Run: `cd frontend && npx next build --no-lint 2>&1 | tail -20` -Expected: No errors related to the new pages - -- [ ] **Step 5: Commit** - -```bash -git add frontend/app/chat/components/ChatHome.tsx frontend/app/skills/page.tsx -git commit -m "feat(frontend): add Skill Creator entry on chat home and skills page" -``` - ---- - -## Task 10: Backend — Pass mode through Chat Stream to Metadata - -**Files:** -- Modify: `frontend/app/chat/hooks/useBackendChatStream.ts` -- Modify: `frontend/services/chatBackend.ts` (or wherever `streamChat` is defined) - -- [ ] **Step 1: Read the streaming service** - -Read `frontend/app/chat/hooks/useBackendChatStream.ts` and the `streamChat` function to understand how metadata is passed to the backend. - -- [ ] **Step 2: Ensure mode passes through** - -The `metadata` field in `ChatRequest` already supports arbitrary keys. The `SkillCreatorChat` component passes `mode: "skill_creator"` via metadata. Verify that the `streamChat` function forwards metadata to the `POST /v1/chat/stream` body. If `mode` needs to be a top-level field (not inside metadata), update the `streamChat` function to include it. - -If `mode` is a top-level ChatRequest field (as designed in Task 1), the frontend `streamChat` call needs to pass it at the top level: - -```typescript -// In streamChat or equivalent: -const body = { - message: userPrompt, - thread_id: opts.threadId, - graph_id: opts.graphId, - mode: opts.metadata?.mode ?? null, - edit_skill_id: opts.metadata?.edit_skill_id ?? null, - metadata: opts.metadata ?? {}, -}; -``` - -- [ ] **Step 3: Commit** - -```bash -git add frontend/app/chat/hooks/useBackendChatStream.ts frontend/services/chatBackend.ts -git commit -m "feat(frontend): pass mode and edit_skill_id through to chat API" -``` - ---- - -## Task 11: Integration Test — End-to-End Skill Creator Flow - -**Files:** -- Create: `backend/tests/test_api/test_skill_creator.py` - -- [ ] **Step 1: Write integration test** - -```python -# backend/tests/test_api/test_skill_creator.py -"""Integration test for skill_creator mode in chat stream.""" -import pytest -from httpx import AsyncClient - - -@pytest.mark.asyncio -async def test_chat_stream_skill_creator_mode_accepted( - client: AsyncClient, auth_headers: dict -): - """Verify that mode=skill_creator is accepted by the chat stream endpoint.""" - response = await client.post( - "/api/v1/chat/stream", - json={ - "message": "Create a simple hello-world skill", - "mode": "skill_creator", - }, - headers=auth_headers, - ) - # SSE endpoint returns 200 with streaming response - assert response.status_code == 200 - - -@pytest.mark.asyncio -async def test_chat_request_rejects_invalid_mode( - client: AsyncClient, auth_headers: dict -): - """Verify that an invalid mode value is rejected.""" - response = await client.post( - "/api/v1/chat/stream", - json={ - "message": "Hello", - "mode": "invalid_mode", - }, - headers=auth_headers, - ) - assert response.status_code == 422 # Pydantic validation error -``` - -- [ ] **Step 2: Run test** - -Run: `cd backend && python -m pytest tests/test_api/test_skill_creator.py -v` -Expected: PASS (requires test fixtures — adapt to existing test setup) - -- [ ] **Step 3: Commit** - -```bash -git add backend/tests/test_api/test_skill_creator.py -git commit -m "test: add integration tests for skill_creator chat mode" -``` - ---- - -## Dependency Order - -``` -Task 1 (Schema) - ↓ -Task 2 (preview_skill tool) → Task 3 (Register tool) - ↓ -Task 4 (GraphService method) - ↓ -Task 5 (Wire into chat.py) — depends on Tasks 1, 3, 4 - ↓ -Task 6 (Page layout) → Task 7 (Chat component) → Task 8 (Preview components) - ↓ -Task 9 (Entry points) - ↓ -Task 10 (Frontend streaming passthrough) — depends on Tasks 7, 5 - ↓ -Task 11 (Integration test) — depends on all above -``` - -Tasks 1-5 are backend (sequential). Tasks 6-9 are frontend (can parallelize with backend after Task 1). Task 10 bridges both. Task 11 is final verification. diff --git a/docs/superpowers/plans/2026-03-19-workspace-member-auth-fixes.md b/docs/superpowers/plans/2026-03-19-workspace-member-auth-fixes.md deleted file mode 100644 index 6960c0251..000000000 --- a/docs/superpowers/plans/2026-03-19-workspace-member-auth-fixes.md +++ /dev/null @@ -1,586 +0,0 @@ -# Workspace Member Authorization System Fixes - -> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking. - -**Goal:** Fix all critical bugs, security holes, and inconsistencies in the workspace member authorization system so that "1 workspace = 1 team" with creator-managed membership is fully self-consistent end-to-end. - -**Architecture:** Fixes are organized into 4 phases — (P1) Backend critical route/security fixes, (P2) Backend service-layer logic fixes, (P3) Frontend bug fixes, (P4) Resource access control gaps. Each phase is independently testable and committable. - -**Tech Stack:** Python/FastAPI/SQLAlchemy (backend), TypeScript/Next.js/React Query (frontend) - ---- - -## Phase 1: Backend Critical Route & Security Fixes - -### Task 1: Fix route ordering — invitation routes shadowed by `/{workspace_id}` - -All `GET /invitations*` routes are declared AFTER `GET /{workspace_id}`, so FastAPI matches "invitations" as a `workspace_id` UUID and returns 422. Move all invitation routes before the dynamic `/{workspace_id}` route. - -Note: `/members/{user_id}` routes (PATCH/DELETE) are NOT affected by this shadowing issue — they have a different prefix segment ("members") and don't collide with `/{workspace_id}`. Only invitation routes need to be moved. - -**Files:** -- Modify: `backend/app/api/v1/workspaces.py` - -- [ ] **Step 1: Move all `/invitations*` routes before `/{workspace_id}`** - -Move the following route blocks (currently at lines 166-263) to appear BEFORE `@router.get("/{workspace_id}")` (currently at line 89): - -```python -# --- Static invitation routes (MUST be before /{workspace_id}) --- - -@router.get("/invitations") -async def list_invitations(...): - ... - -@router.get("/invitations/pending") -async def list_pending_invitations(...): - ... - -@router.get("/invitations/all") -async def list_all_invitations(...): - ... - -@router.post("/invitations") -async def create_invitation(...): - ... - -@router.get("/invitations/{token}") -async def get_invitation_by_token(...): - ... - -@router.post("/invitations/{invitation_id}/accept") -async def accept_invitation(...): - ... - -@router.post("/invitations/{invitation_id}/reject") -async def reject_invitation(...): - ... - -@router.post("/invitations/token/{token}/accept") -async def accept_invitation_by_token(...): - ... - -# --- Dynamic workspace routes (AFTER all static paths) --- - -@router.get("/{workspace_id}") -async def get_workspace(...): - ... -``` - -The final order in the file should be: -1. `GET /` (list workspaces) -2. `POST /` (create workspace) -3. All `/invitations*` routes (GET, POST, accept, reject, token-accept) -4. `GET /{workspace_id}` and other `/{workspace_id}/*` routes -5. `/members/{user_id}` routes (can stay at end — not shadowed) - -- [ ] **Step 2: Verify by starting the server and testing** - -Run: `cd /Users/yuzhenjiang1/Downloads/workspace/JoySafeter/backend && python -c "from app.api.v1.workspaces import router; print([r.path for r in router.routes])"` -Expected: `/invitations` appears before `/{workspace_id}` in the route list. - -- [ ] **Step 3: Commit** - -```bash -git add backend/app/api/v1/workspaces.py -git commit -m "fix: reorder workspace routes to prevent invitation paths being shadowed by /{workspace_id}" -``` - ---- - -### Task 2: Delete unused `require_roles()` — dead code with broken logic - -`require_roles(*roles)` accepts roles but never checks them — any authenticated user passes. Confirmed via grep: this function is NOT called anywhere in the backend. It is only defined in `dependencies.py`. Delete it. - -**Files:** -- Modify: `backend/app/common/dependencies.py:138-146` - -- [ ] **Step 1: Delete the `require_roles` function (lines 138-146)** - -Remove the entire function: - -```python -# DELETE THIS ENTIRE BLOCK: -def require_roles(*roles: str): - """角色权限检查装饰器""" - - async def check_roles(current_user: User = Depends(get_current_user)): - if current_user.is_superuser: - return current_user - return current_user - - return Depends(check_roles) -``` - -- [ ] **Step 2: Verify no import breakage** - -```bash -cd /Users/yuzhenjiang1/Downloads/workspace/JoySafeter && grep -rn "require_roles" backend/ -``` - -Expected: Only appears in the import line of the file being edited (if any), and the definition itself. After deletion, zero matches. - -- [ ] **Step 3: Commit** - -```bash -git add backend/app/common/dependencies.py -git commit -m "fix: remove unused require_roles function that silently bypassed role checks" -``` - ---- - -### Task 3: Protect owner from being removed via `remove_member` - -An admin can currently remove the workspace owner from `workspace_members`. Add a guard identical to what exists in `update_member_role`. - -**Files:** -- Modify: `backend/app/services/workspace_service.py:921-960` - -- [ ] **Step 1: Add owner removal guard** - -Add after the `NotFoundException` guard (after `if not target_member: raise NotFoundException(...)` at line 941), before `current_role` is fetched: - -```python -# 不能移除工作空间拥有者 -if str(workspace.owner_id) == str(target_user_id): - raise BadRequestException("Cannot remove workspace owner") -``` - -- [ ] **Step 2: Commit** - -```bash -git add backend/app/services/workspace_service.py -git commit -m "fix: prevent workspace owner from being removed via remove_member" -``` - ---- - -## Phase 2: Backend Service Logic Fixes - -### Task 4: Fix repository `update_status` double-commit breaking transaction atomicity - -`WorkspaceInvitationRepository.update_status()` calls `self.db.commit()` directly, but its callers (`accept_invitation`, `reject_invitation`) also call `self.commit()`. This breaks atomicity — the invitation status update and member creation happen in separate transactions. - -**Files:** -- Modify: `backend/app/repositories/workspace.py:136-144` - -- [ ] **Step 1: Replace `commit()` with `flush()` in `update_status`** - -```python -async def update_status(self, invitation_id: uuid.UUID, status: WorkspaceInvitationStatus) -> WorkspaceInvitation: - """更新邀请状态""" - invitation = await self.get(invitation_id) - if not invitation: - raise NotFoundException("Invitation not found") - invitation.status = status - await self.db.flush() # flush, not commit — let the service layer control the transaction - await self.db.refresh(invitation) - return invitation -``` - -- [ ] **Step 2: Commit** - -```bash -git add backend/app/repositories/workspace.py -git commit -m "fix: use flush instead of commit in invitation update_status to preserve transaction atomicity" -``` - ---- - -### Task 5: Fix `accept_invitation` — mutates state then raises error for existing members - -**Dependency: Task 4 must be completed first** (Task 4 changes `update_status` from `commit()` to `flush()`, which this task relies on for correct transaction behavior). - -When a user is already a member, the code marks the invitation as `accepted` and THEN raises a 400 error. The invitation is permanently consumed for no reason. Fix: return success instead (idempotent behavior). Note: `accept_invitation_by_token` (line 673-678) calls this method internally, so it also gets the fix. - -**Files:** -- Modify: `backend/app/services/workspace_service.py:620-626` - -- [ ] **Step 1: Return success instead of raising error when already a member** - -Replace lines 620-626: - -```python -# 检查用户是否已经是成员 -existing_member = await self.member_repo.get_member(invitation.workspace_id, current_user.id) -if existing_member: - # 用户已是成员 — 标记邀请为已接受并返回成功(幂等操作) - await self.invitation_repo.update_status(invitation.id, WorkspaceInvitationStatus.accepted) - await self.commit() - workspace = await self.workspace_repo.get(invitation.workspace_id) - return { - "success": True, - "workspace": await self._serialize_workspace(workspace, current_user), - "message": "You are already a member of this workspace", - } -``` - -- [ ] **Step 2: Commit** - -```bash -git add backend/app/services/workspace_service.py -git commit -m "fix: accept_invitation returns success for existing members instead of error" -``` - ---- - -### Task 6: Add duplicate invitation check in `create_invitation` - -`find_pending()` repository method exists but is never called. An admin can spam unlimited invitations to the same email. - -**Files:** -- Modify: `backend/app/services/workspace_service.py:269-310` - -- [ ] **Step 1: Add duplicate check after existing-member check (around line 295)** - -Insert after the existing-member check block: - -```python -# 检查是否已有未过期的待处理邀请 -existing_pending = await self.invitation_repo.find_pending(workspace_id, email.lower()) -if existing_pending: - if existing_pending.expires_at and existing_pending.expires_at > datetime.now(timezone.utc): - raise BadRequestException( - f"A pending invitation already exists for {email}. 该用户已有待处理的邀请" - ) -``` - -- [ ] **Step 2: Normalize email to lowercase before storing** - -Change line 302 from `"email": email,` to `"email": email.lower(),`. - -- [ ] **Step 3: Commit** - -```bash -git add backend/app/services/workspace_service.py -git commit -m "fix: prevent duplicate invitations and normalize email to lowercase" -``` - ---- - -### Task 7: Remove dead code — `datetime.utcnow()` call and deprecated method - -**Files:** -- Modify: `backend/app/services/workspace_service.py` - -- [ ] **Step 1: Verify no callers of deprecated method exist** - -```bash -cd /Users/yuzhenjiang1/Downloads/workspace/JoySafeter && grep -rn "list_all_invitations_for_user[^_]" backend/ -``` - -Expected: Only the method definition itself appears. If any route or service calls it, do NOT delete — update the caller to use the paginated version first. - -- [ ] **Step 2: Remove orphaned `datetime.utcnow()` call at line 134** - -Delete the line `datetime.utcnow()` in `create_workspace`. - -- [ ] **Step 3: Remove deprecated `list_all_invitations_for_user` method (lines 439-478)** - -Delete the entire method that is marked `已废弃,使用分页版本`. - -- [ ] **Step 4: Commit** - -```bash -git add backend/app/services/workspace_service.py -git commit -m "chore: remove dead code (orphaned datetime call, deprecated invitation list method)" -``` - ---- - -### Task 8: Delete unused `_ensure_workspace_role` in environment.py - -The local helper in `environment.py` (lines 34-46) doesn't handle superusers or owners not in the members table. However, **the function is never called** — the actual workspace routes in this file already use `require_workspace_role` from `dependencies.py` as a FastAPI dependency. Simply delete the dead code. - -**Files:** -- Modify: `backend/app/api/v1/environment.py:34-46` - -- [ ] **Step 1: Delete the `_ensure_workspace_role` function and its unused import** - -Delete lines 34-46 (the entire function). Also remove the unused import of `WorkspaceMemberRepository` at line 24 if it's no longer used elsewhere in the file. - -- [ ] **Step 2: Verify no callers remain** - -```bash -grep -n "_ensure_workspace_role" backend/app/api/v1/environment.py -``` - -Expected: Zero matches after deletion. - -- [ ] **Step 3: Commit** - -```bash -git add backend/app/api/v1/environment.py -git commit -m "fix: environment workspace role check now handles superuser and owner-not-in-members" -``` - ---- - -## Phase 3: Frontend Bug Fixes - -### Task 9: Fix invitation accept page — wrong `/auth/signin` redirect - -The actual signin route is `/signin` (Next.js route group `(auth)/signin`), not `/auth/signin`. - -**Files:** -- Modify: `frontend/app/workspace/invitations/accept/page.tsx:94,119` - -- [ ] **Step 1: Replace `/auth/signin` with `/signin`** - -Line 94: -```typescript -router.push(`/signin?callbackUrl=${encodeURIComponent(`/workspace/invitations/accept?token=${token}`)}`) -``` - -Line 119: -```typescript -router.push(`/signin?callbackUrl=${encodeURIComponent(`/workspace/invitations/accept?token=${token}`)}`) -``` - -- [ ] **Step 2: Commit** - -```bash -git add frontend/app/workspace/invitations/accept/page.tsx -git commit -m "fix: correct signin redirect path in invitation accept page (/signin not /auth/signin)" -``` - ---- - -### Task 10: Fix `acceptInvitation` in workspaceService — token vs ID inconsistency - -The accept page uses `token` but calls `acceptInvitation(token)` which hits `/invitations/{token}/accept`. The backend has TWO accept endpoints: `/invitations/{invitation_id}/accept` (by ID) and `/invitations/token/{token}/accept` (by token). The frontend service should use the token-specific endpoint when passing a token. - -**Files:** -- Modify: `frontend/services/workspaceService.ts:215-219` - -- [ ] **Step 1: Add separate methods for accept-by-ID and accept-by-token** - -```typescript -/** - * Accept invitation by ID (for notification banner) - */ -async acceptInvitationById(invitationId: string): Promise { - return apiPost( - `${API_ENDPOINTS.workspaces}/invitations/${invitationId}/accept` - ) -}, - -/** - * Accept invitation by token (for email link) - */ -async acceptInvitationByToken(token: string): Promise { - return apiPost( - `${API_ENDPOINTS.workspaces}/invitations/token/${token}/accept` - ) -}, -``` - -Keep the old `acceptInvitation` as an alias for `acceptInvitationById` for backward compat: - -```typescript -async acceptInvitation(invitationId: string): Promise { - return this.acceptInvitationById(invitationId) -}, -``` - -- [ ] **Step 2: Update the accept page to use `acceptInvitationByToken`** - -In `frontend/app/workspace/invitations/accept/page.tsx`, change the mutation: - -```typescript -mutationFn: async () => { - if (!token) throw new Error('Token is required') - return workspaceService.acceptInvitationByToken(token) -}, -``` - -- [ ] **Step 3: Verify notification banner uses `acceptInvitationById` with `invitation.id`** - -Search for `acceptInvitation` calls in notification components and ensure they pass `invitation.id` and call `acceptInvitationById` (or the existing `acceptInvitation` alias). - -- [ ] **Step 4: Commit** - -```bash -git add frontend/services/workspaceService.ts frontend/app/workspace/invitations/accept/page.tsx -git commit -m "fix: distinguish acceptInvitationById vs acceptInvitationByToken to fix token/ID mismatch" -``` - ---- - -### Task 11: Fix workspace switcher menu — hide admin-only items from non-admin users - -The workspace context menu in the header shows "Members Management" and "API Keys" to all roles, including viewers. - -**Files:** -- Modify: `frontend/components/sidebar/components/workspace-header/workspace-header.tsx` - -**Context:** This component does NOT currently import `useWorkspacePermissions` or `useUserPermissions`. However, each workspace object in the `workspaces` list already carries a `role` field (serialized by the backend's `_serialize_workspace`). Use this existing `role` field directly instead of adding new hook calls. - -- [ ] **Step 1: Find the context menu rendering (around lines 740-803)** - -Read the file to locate the exact menu items for "Members Management" and "API Keys", and confirm the workspace object structure includes `role`. - -- [ ] **Step 2: Gate admin-only menu items using the workspace's `role` field** - -Check the role of the active workspace and conditionally render: - -```tsx -{(activeWorkspace.role === 'owner' || activeWorkspace.role === 'admin') && ( - <> - Members Management - API Keys - -)} -``` - -- [ ] **Step 3: Commit** - -```bash -git add frontend/components/sidebar/components/workspace-header/workspace-header.tsx -git commit -m "fix: hide admin-only menu items (Members, API Keys) from non-admin users in workspace switcher" -``` - ---- - -## Phase 4: Resource Access Control Gaps - -### Task 12: Add workspace membership check to Traces API - -Currently any authenticated user can query any workspace's traces by passing an arbitrary `workspace_id`. Add workspace membership validation. - -**Files:** -- Modify: `backend/app/api/v1/traces.py:162-198` - -- [ ] **Step 1: Add workspace membership check when `workspace_id` is provided** - -In `list_traces`, after the service is created and before querying: - -```python -# 如果指定了 workspace_id,校验当前用户是否有权限访问该工作空间 -if workspace_id: - from app.services.workspace_permission import check_workspace_access - from app.models.workspace import WorkspaceMemberRole - has_access = await check_workspace_access(db, workspace_id, current_user, WorkspaceMemberRole.viewer) - if not has_access: - raise ForbiddenException("No access to workspace traces") -``` - -Add the import at the top: -```python -from app.common.exceptions import ForbiddenException -``` - -- [ ] **Step 2: Same check in `get_trace_detail` — validate workspace ownership of the trace** - -After fetching the trace, if `trace.workspace_id` is set, check membership: - -```python -if trace.workspace_id: - from app.services.workspace_permission import check_workspace_access - from app.models.workspace import WorkspaceMemberRole - has_access = await check_workspace_access(db, trace.workspace_id, current_user, WorkspaceMemberRole.viewer) - if not has_access: - raise ForbiddenException("No access to workspace traces") -``` - -- [ ] **Step 3: Commit** - -```bash -git add backend/app/api/v1/traces.py -git commit -m "fix: add workspace membership check to traces API endpoints" -``` - ---- - -### Task 13: Add workspace access check to Copilot endpoints - -The copilot history/messages endpoints under `/graphs/{graph_id}/copilot/*` never check graph or workspace access. - -**Files:** -- Modify: `backend/app/api/v1/graphs.py` (copilot endpoints) - -- [ ] **Step 1: Identify the copilot endpoints in graphs.py** - -Search for `copilot` in the file to find exact line numbers. - -- [ ] **Step 2: Add workspace access check to each copilot endpoint** - -The copilot endpoints use `CopilotService`, not the graph service. To look up the graph's workspace, you need to query the graph separately. The existing `_ensure_workspace_member` helper in `graphs.py` calls `check_workspace_access` and raises `ForbiddenException` — reuse it. - -For each copilot endpoint (`GET /{graph_id}/copilot/history`, `DELETE /{graph_id}/copilot/history`, `POST /{graph_id}/copilot/messages`): - -1. Import and instantiate the graph repository to look up the graph -2. Use the existing `_ensure_workspace_member` helper - -```python -from app.repositories.graph import GraphRepository - -# At the start of the copilot endpoint: -graph_repo = GraphRepository(db) -graph = await graph_repo.get(graph_id) -if graph and graph.workspace_id: - await _ensure_workspace_member(db, graph.workspace_id, current_user, WorkspaceMemberRole.viewer) -``` - -For write operations (POST messages, DELETE history), use `WorkspaceMemberRole.member` instead of `viewer`. - -- [ ] **Step 3: Commit** - -```bash -git add backend/app/api/v1/graphs.py -git commit -m "fix: add workspace access check to copilot history and messages endpoints" -``` - ---- - -### Task 14: Add workspace membership check to graph schema import - -`POST /schema/import` passes `workspace_id` from the request body to create a graph without checking membership. - -**Files:** -- Modify: `backend/app/api/v1/graph_schemas.py` - -- [ ] **Step 1: Read the file to find the import endpoint** - -```bash -grep -n "import" backend/app/api/v1/graph_schemas.py -``` - -- [ ] **Step 2: Add workspace membership check before creating the graph** - -```python -if workspace_id: - from app.services.workspace_permission import check_workspace_access - from app.models.workspace import WorkspaceMemberRole - has_access = await check_workspace_access(db, workspace_id, current_user, WorkspaceMemberRole.member) - if not has_access: - raise ForbiddenException("No access to import into this workspace") -``` - -- [ ] **Step 3: Commit** - -```bash -git add backend/app/api/v1/graph_schemas.py -git commit -m "fix: add workspace membership check to graph schema import endpoint" -``` - ---- - -## Summary of All Changes - -| # | File | Change | Severity | Deps | -|---|------|--------|----------|------| -| 1 | `workspaces.py` | Reorder invitation routes before `/{workspace_id}` | CRITICAL | — | -| 2 | `dependencies.py` | Delete unused `require_roles` (broken no-op) | CRITICAL | — | -| 3 | `workspace_service.py` | Prevent owner removal in `remove_member` | CRITICAL | — | -| 4 | `repositories/workspace.py` | `flush()` instead of `commit()` in `update_status` | HIGH | — | -| 5 | `workspace_service.py` | Accept-invitation idempotent for existing members | HIGH | Task 4 | -| 6 | `workspace_service.py` | Duplicate invitation check + email normalization | HIGH | — | -| 7 | `workspace_service.py` | Remove dead code (datetime call + deprecated method) | LOW | — | -| 8 | `environment.py` | Delete unused `_ensure_workspace_role` helper | MEDIUM | — | -| 9 | `accept/page.tsx` | Fix `/auth/signin` → `/signin` | CRITICAL | — | -| 10 | `workspaceService.ts` + `accept/page.tsx` | Token vs ID accept distinction | HIGH | Task 1 | -| 11 | `workspace-header.tsx` | Hide admin-only menu items via `workspace.role` | HIGH | — | -| 12 | `traces.py` | Add workspace membership check | CRITICAL | — | -| 13 | `graphs.py` (copilot) | Add workspace access check via graph lookup | HIGH | — | -| 14 | `graph_schemas.py` | Add workspace membership check to import | HIGH | — | diff --git a/docs/superpowers/plans/2026-03-20-chat-interaction-refactor.md b/docs/superpowers/plans/2026-03-20-chat-interaction-refactor.md deleted file mode 100644 index 54fa5cd50..000000000 --- a/docs/superpowers/plans/2026-03-20-chat-interaction-refactor.md +++ /dev/null @@ -1,1335 +0,0 @@ -# Chat Interaction Refactor Implementation Plan - -> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking. - -**Goal:** Refactor Chat to borrow Skill Creator's proven UX patterns (human-readable tool calls, persistent preview panel, per-message actions, starter prompts) while preserving free-form conversation. - -**Architecture:** Two-phase approach. Phase 1 extracts ChatInterface's 14 useState into a useReducer + Context, splits the 683-line component into focused modules (ChatLayout, ConversationPanel, PreviewPanel, ChatProvider). Phase 2 introduces shared components (ToolCallDisplay, ActionBar, StarterPrompts) and converts the overlay-based side panels into a ResizablePanel column. - -**Tech Stack:** React 18, Next.js App Router, TypeScript, Tailwind CSS, shadcn/ui ResizablePanel, TanStack Query - -**Spec:** `docs/superpowers/specs/2026-03-20-chat-interaction-refactor-design.md` - ---- - -## File Structure - -All paths relative to `frontend/app/`. - -**Phase 1 — New files:** -| File | Responsibility | -|------|---------------| -| `chat/hooks/useChatReducer.ts` | ChatState/ChatAction types, reducer function, initial state | -| `chat/ChatProvider.tsx` | ChatStateContext + ChatStreamContext providers | -| `chat/ChatLayout.tsx` | Three-column ResizablePanelGroup skeleton | -| `chat/conversation/ConversationPanel.tsx` | Message list + streaming + input container | -| `chat/conversation/MessageList.tsx` | Message rendering (from ThreadContent) | -| `chat/conversation/MessageBubble.tsx` | Single message (from MessageItem) | -| `chat/conversation/ChatInput.tsx` | Input (moved from components/) | -| `chat/conversation/index.ts` | Barrel export | -| `chat/preview/PreviewPanel.tsx` | Tab-based preview container | -| `chat/preview/FileTreePreview.tsx` | File tree + code viewer (from ArtifactPanel) | -| `chat/preview/index.ts` | Barrel export | -| `chat/hooks/usePreviewTrigger.ts` | Auto-expand/collapse logic | -| `chat/types.ts` | Extended with FileTreeEntry, NodeLogEntry, MessageMetadata | - -**Phase 1 — Modified files:** -| File | Change | -|------|--------| -| `chat/ChatInterface.tsx` | Rename to ChatPage.tsx, reduce to <50 lines | -| `chat/page.tsx` | Update import from ChatInterface to ChatPage | -| `chat/hooks/useBackendChatStream.ts` | Accept dispatch instead of setMessages | - -**Phase 1 — Deleted files (Phase 2 Step 2):** -| File | Replaced by | -|------|------------| -| `chat/components/ArtifactsDrawer.tsx` | preview/PreviewPanel | -| `chat/components/CompactArtifactStatus.tsx` | PreviewPanel auto-expand | -| `chat/components/ToolExecutionPanel.tsx` | preview/PreviewPanel tool tab | -| `chat/components/CompactToolStatus.tsx` | shared/ToolCallDisplay | - -**Phase 2 — New files:** -| File | Responsibility | -|------|---------------| -| `shared/ToolCallDisplay/toolDisplayRegistry.ts` | Tool name → label registry | -| `shared/ToolCallDisplay/ToolCallBadge.tsx` | Compact inline tool display | -| `shared/ToolCallDisplay/ToolCallDetail.tsx` | Expanded tool detail view | -| `shared/ToolCallDisplay/index.ts` | Barrel export | -| `shared/ActionBar/ActionBar.tsx` | Per-message action container | -| `shared/ActionBar/actions/CopyAction.tsx` | Copy message content | -| `shared/ActionBar/actions/RetryAction.tsx` | Retry message | -| `shared/ActionBar/index.ts` | Barrel export | -| `shared/StarterPrompts/StarterPrompts.tsx` | Guided start chips | -| `shared/StarterPrompts/index.ts` | Barrel export | - ---- - -## Phase 1: Architecture Cleanup - -### Task 1: Extend types.ts with typed metadata - -**Files:** -- Modify: `chat/types.ts` (71 lines) - -- [ ] **Step 1: Add FileTreeEntry, NodeLogEntry, MessageMetadata types** - -Add above the existing `Message` interface at line 25: - -```typescript -export interface FileTreeEntry { - action: string - size?: number - timestamp?: number -} - -export interface NodeLogEntry { - type: 'command' | 'route_decision' | 'loop_iteration' | 'parallel_task' | 'state_update' | 'node_transition' - nodeName: string - timestamp: number - data?: Record -} - -export interface MessageMetadata { - fileTree?: Record - nodeExecutionLog?: NodeLogEntry[] - currentNode?: string - lastNode?: string - lastRunId?: string - lastUpdate?: number - lastRouteDecision?: any - lastLoopIteration?: any - [key: string]: any // keep backwards compat -} -``` - -Update the `Message` interface to use `MessageMetadata`: - -```typescript -export interface Message { - // ... existing fields unchanged - metadata?: MessageMetadata -} -``` - -- [ ] **Step 2: Verify TypeScript compiles** - -Run: `cd frontend && npx tsc --noEmit --pretty 2>&1 | head -30` -Expected: No new errors (existing errors may remain) - -- [ ] **Step 3: Commit** - -```bash -git add frontend/app/chat/types.ts -git commit -m "refactor(chat): add typed FileTreeEntry, NodeLogEntry, MessageMetadata to types.ts" -``` - ---- - -### Task 2: Create useChatReducer and ChatProvider - -**Files:** -- Create: `chat/hooks/useChatReducer.ts` -- Create: `chat/ChatProvider.tsx` - -- [ ] **Step 1: Create useChatReducer.ts** - -```typescript -'use client' - -import { useReducer } from 'react' -import type { Message, ToolCall, FileTreeEntry, NodeLogEntry } from '../types' - -// ─── State ─────────────────────────────────────────────────────────────────── -export interface ChatState { - messages: Message[] - threadId: string | null - input: string - streaming: { - isProcessing: boolean - isSubmitting: boolean - text: string - nodeExecutionLog: NodeLogEntry[] - } - preview: { - visible: boolean - fileTree: Record - activeFile: string | null - userDismissed: boolean - } - ui: { - sidebarVisible: boolean - selectedTool: ToolCall | null - showNoDefaultModelNotice: boolean - } - mode: { - currentMode: string | undefined - currentGraphId: string | null - hasShownApkPrompt: boolean - } -} - -export const initialChatState: ChatState = { - messages: [], - threadId: null, - input: '', - streaming: { - isProcessing: false, - isSubmitting: false, - text: '', - nodeExecutionLog: [], - }, - preview: { - visible: false, - fileTree: {}, - activeFile: null, - userDismissed: false, - }, - ui: { - sidebarVisible: false, - selectedTool: null, - showNoDefaultModelNotice: false, - }, - mode: { - currentMode: undefined, - currentGraphId: null, - hasShownApkPrompt: false, - }, -} - -// ─── Actions ───────────────────────────────────────────────────────────────── -export type ChatAction = - // Thread - | { type: 'SET_THREAD'; threadId: string } - | { type: 'RESET' } - // Messages - | { type: 'APPEND_MESSAGE'; message: Message } - | { type: 'UPDATE_MESSAGE'; id: string; patch: Partial } - | { type: 'SET_MESSAGES'; messages: Message[] } - // Input - | { type: 'SET_INPUT'; value: string } - // Streaming lifecycle - | { type: 'STREAM_START' } - | { type: 'STREAM_CONTENT'; delta: string } - | { type: 'STREAM_DONE' } - | { type: 'STREAM_ERROR'; error: string } - // File & tool events - | { type: 'FILE_EVENT'; path: string; info: FileTreeEntry } - | { type: 'TOOL_START'; tool: ToolCall } - | { type: 'TOOL_END'; id: string; result: string } - // Node execution (agent mode) - | { type: 'NODE_START'; nodeId: string; label: string } - | { type: 'NODE_END'; nodeId: string } - | { type: 'NODE_LOG'; entry: NodeLogEntry } - // UI - | { type: 'TOGGLE_SIDEBAR' } - | { type: 'SHOW_PREVIEW'; tab?: string } - | { type: 'HIDE_PREVIEW' } - | { type: 'SELECT_TOOL'; tool: ToolCall | null } - | { type: 'DISMISS_MODEL_NOTICE' } - | { type: 'SHOW_MODEL_NOTICE' } - // Mode - | { type: 'SET_MODE'; mode: string; graphId: string | null } - | { type: 'SET_APK_PROMPT_SHOWN' } - -// ─── Reducer ───────────────────────────────────────────────────────────────── -export function chatReducer(state: ChatState, action: ChatAction): ChatState { - switch (action.type) { - case 'SET_THREAD': - return { ...state, threadId: action.threadId } - - case 'RESET': - return { - ...initialChatState, - ui: { ...initialChatState.ui, sidebarVisible: state.ui.sidebarVisible }, - } - - case 'APPEND_MESSAGE': - return { ...state, messages: [...state.messages, action.message] } - - case 'UPDATE_MESSAGE': - return { - ...state, - messages: state.messages.map((m) => - m.id === action.id ? { ...m, ...action.patch } : m - ), - } - - case 'SET_MESSAGES': - return { ...state, messages: action.messages } - - case 'SET_INPUT': - return { ...state, input: action.value } - - case 'STREAM_START': - return { - ...state, - streaming: { - ...state.streaming, - isProcessing: true, - isSubmitting: false, - text: '', - nodeExecutionLog: [], - }, - preview: { ...state.preview, userDismissed: false }, - } - - case 'STREAM_CONTENT': { - const lastMsg = state.messages[state.messages.length - 1] - if (!lastMsg || lastMsg.role !== 'assistant') return state - return { - ...state, - streaming: { ...state.streaming, text: (lastMsg.content ?? '') + action.delta }, - } - } - - case 'STREAM_DONE': - return { - ...state, - streaming: { ...state.streaming, isProcessing: false, isSubmitting: false, text: '' }, - } - - case 'STREAM_ERROR': - return { - ...state, - streaming: { ...state.streaming, isProcessing: false, isSubmitting: false }, - } - - case 'FILE_EVENT': - return { - ...state, - preview: { - ...state.preview, - fileTree: { ...state.preview.fileTree, [action.path]: action.info }, - }, - } - - case 'TOOL_START': - return { - ...state, - messages: state.messages.map((m, i) => - i === state.messages.length - 1 - ? { ...m, tool_calls: [...(m.tool_calls ?? []), action.tool] } - : m - ), - } - - case 'TOOL_END': - return { - ...state, - messages: state.messages.map((m, i) => - i === state.messages.length - 1 - ? { - ...m, - tool_calls: m.tool_calls?.map((tc) => - tc.id === action.id ? { ...tc, status: 'completed' as const, result: action.result, endTime: Date.now() } : tc - ), - } - : m - ), - } - - case 'NODE_START': - return { - ...state, - messages: state.messages.map((m, i) => - i === state.messages.length - 1 - ? { ...m, metadata: { ...m.metadata, currentNode: action.label } } - : m - ), - } - - case 'NODE_END': - return state // currentNode cleared on next NODE_START or STREAM_DONE - - case 'NODE_LOG': - return { - ...state, - streaming: { - ...state.streaming, - nodeExecutionLog: [...state.streaming.nodeExecutionLog, action.entry], - }, - } - - case 'TOGGLE_SIDEBAR': - return { ...state, ui: { ...state.ui, sidebarVisible: !state.ui.sidebarVisible } } - - case 'SHOW_PREVIEW': - return { ...state, preview: { ...state.preview, visible: true } } - - case 'HIDE_PREVIEW': - return { ...state, preview: { ...state.preview, visible: false, userDismissed: true } } - - case 'SELECT_TOOL': - return { ...state, ui: { ...state.ui, selectedTool: action.tool } } - - case 'DISMISS_MODEL_NOTICE': - return { ...state, ui: { ...state.ui, showNoDefaultModelNotice: false } } - - case 'SHOW_MODEL_NOTICE': - return { ...state, ui: { ...state.ui, showNoDefaultModelNotice: true } } - - case 'SET_MODE': - return { - ...state, - mode: { ...state.mode, currentMode: action.mode, currentGraphId: action.graphId }, - } - - case 'SET_APK_PROMPT_SHOWN': - return { ...state, mode: { ...state.mode, hasShownApkPrompt: true } } - - default: - return state - } -} - -export function useChatReducer(initialState?: Partial) { - return useReducer(chatReducer, { ...initialChatState, ...initialState }) -} -``` - -- [ ] **Step 2: Create ChatProvider.tsx** - -```typescript -'use client' - -import React, { createContext, useContext, useMemo } from 'react' -import type { ChatState, ChatAction } from './hooks/useChatReducer' -import { useChatReducer, initialChatState } from './hooks/useChatReducer' - -// Low-frequency context: messages, UI, mode -interface ChatStateContextValue { - state: ChatState - dispatch: React.Dispatch -} - -// High-frequency context: streaming text, isProcessing -interface ChatStreamContextValue { - text: string - isProcessing: boolean - isSubmitting: boolean -} - -const ChatStateContext = createContext(null) -const ChatStreamContext = createContext(null) - -export function ChatProvider({ children }: { children: React.ReactNode }) { - const [state, dispatch] = useChatReducer() - - const stateValue = useMemo( - () => ({ state, dispatch }), - // Exclude streaming fields from dependency to avoid re-rendering state consumers on every delta - // eslint-disable-next-line react-hooks/exhaustive-deps - [state.messages, state.threadId, state.input, state.preview, state.ui, state.mode, dispatch] - ) - - const streamValue = useMemo( - () => ({ - text: state.streaming.text, - isProcessing: state.streaming.isProcessing, - isSubmitting: state.streaming.isSubmitting, - }), - [state.streaming.text, state.streaming.isProcessing, state.streaming.isSubmitting] - ) - - return ( - - - {children} - - - ) -} - -export function useChatState() { - const ctx = useContext(ChatStateContext) - if (!ctx) throw new Error('useChatState must be used within ChatProvider') - return ctx -} - -export function useChatStream() { - const ctx = useContext(ChatStreamContext) - if (!ctx) throw new Error('useChatStream must be used within ChatProvider') - return ctx -} - -export { ChatStateContext, ChatStreamContext } -``` - -- [ ] **Step 3: Verify TypeScript compiles** - -Run: `cd frontend && npx tsc --noEmit --pretty 2>&1 | head -30` -Expected: No new errors - -- [ ] **Step 4: Commit** - -```bash -git add frontend/app/chat/hooks/useChatReducer.ts frontend/app/chat/ChatProvider.tsx -git commit -m "refactor(chat): add useChatReducer and ChatProvider with dual-context split" -``` - ---- - -### Task 3: Adapt useBackendChatStream to dispatch - -**Files:** -- Modify: `chat/hooks/useBackendChatStream.ts` (549 lines) - -This is the most critical migration step. The hook must accept `dispatch` instead of `setMessages` and convert every SSE event handler from `safeSetMessages(prev => ...)` to `dispatch({ type: ... })`. - -- [ ] **Step 1: Change hook signature** - -Replace the function signature (line 26): - -```typescript -// Before: -export const useBackendChatStream = ( - setMessages: React.Dispatch>, -) - -// After: -export const useBackendChatStream = ( - dispatch: React.Dispatch, -) -``` - -Add import at top: -```typescript -import type { ChatAction } from './useChatReducer' -``` - -- [ ] **Step 2: Replace all safeSetMessages calls with dispatch calls** - -Key transformations (referencing actual event handlers in the file): - -| SSE Event | Current pattern | New pattern | -|-----------|----------------|-------------| -| Before stream (blank assistant msg) | `setMessages(prev => [...prev, blankMsg])` | `dispatch({ type: 'APPEND_MESSAGE', message: blankMsg })` | -| `content` | `safeSetMessages(prev => prev.map(...update last msg content))` | `dispatch({ type: 'UPDATE_MESSAGE', id: lastMsgId, patch: { content: accumulated } })` | -| `tool_start` | `safeSetMessages(prev => prev.map(...push tool to last msg))` | `dispatch({ type: 'TOOL_START', tool: { id, name, args, status: 'running', startTime } })` | -| `tool_end` | `safeSetMessages(prev => prev.map(...update tool status))` | `dispatch({ type: 'TOOL_END', id, result })` | -| `file_event` | `safeSetMessages(prev => prev.map(...update metadata.fileTree))` | `dispatch({ type: 'FILE_EVENT', path, info: { action, size, timestamp } })` | -| `node_start` | `safeSetMessages(prev => prev.map(...set metadata.currentNode))` | `dispatch({ type: 'NODE_START', nodeId, label })` | -| `node_end` | `safeSetMessages(prev => prev.map(...clear currentNode))` | `dispatch({ type: 'NODE_END', nodeId })` | -| `command`/`route_decision`/etc. | `nodeExecutionLog.push(...); safeSetMessages(...)` | `dispatch({ type: 'NODE_LOG', entry: { type, nodeName, timestamp, data } })` | -| `error` | `safeSetMessages(prev => prev.map(...append error))` | `dispatch({ type: 'STREAM_ERROR', error: text })` | -| `done` | `safeSetMessages(prev => prev.map(...set isStreaming false))` | `dispatch({ type: 'STREAM_DONE' })` | -| `thread_id` | `threadIdRef.current = id` | Keep ref + `dispatch({ type: 'SET_THREAD', threadId: id })` | - -Remove the `nodeExecutionLog` closure variable — it is now in the reducer state. - -Remove `safeSetMessages` wrapper — dispatch is already safe to call after unmount (no-op on unmounted reducer). - -Keep `abortRef` and `isMountedRef` for abort logic. - -- [ ] **Step 3: Update sendMessage to dispatch STREAM_START** - -At the beginning of `sendMessage`, before creating the blank assistant message: -```typescript -dispatch({ type: 'STREAM_START' }) -``` - -- [ ] **Step 4: Keep the lastMsgId tracking** - -The hook needs to track the ID of the current assistant message being streamed. Use a ref: -```typescript -const currentMsgIdRef = useRef('') -// Set when creating blank assistant message -currentMsgIdRef.current = blankMsg.id -// Use in UPDATE_MESSAGE dispatches -dispatch({ type: 'UPDATE_MESSAGE', id: currentMsgIdRef.current, patch: { ... } }) -``` - -- [ ] **Step 5: Verify TypeScript compiles** - -Run: `cd frontend && npx tsc --noEmit --pretty 2>&1 | head -30` -Expected: Errors in ChatInterface.tsx (expected — it still passes setMessages). No errors in useBackendChatStream.ts itself. - -- [ ] **Step 6: Commit** - -```bash -git add frontend/app/chat/hooks/useBackendChatStream.ts -git commit -m "refactor(chat): adapt useBackendChatStream to dispatch-based state management" -``` - ---- - -### Task 4: Create ConversationPanel and move components - -**Files:** -- Create: `chat/conversation/ConversationPanel.tsx` -- Create: `chat/conversation/MessageList.tsx` (copy from `chat/components/ThreadContent.tsx`) -- Create: `chat/conversation/MessageBubble.tsx` (copy from `chat/components/MessageItem.tsx`) -- Move: `chat/components/ChatInput.tsx` → `chat/conversation/ChatInput.tsx` -- Create: `chat/conversation/index.ts` - -- [ ] **Step 1: Copy ThreadContent.tsx to MessageList.tsx** - -Copy `chat/components/ThreadContent.tsx` to `chat/conversation/MessageList.tsx`. Update: -- Rename component from `ThreadContent` to `MessageList` -- Update import of `MessageItem` to `MessageBubble` from `./MessageBubble` -- The props interface stays the same for now (will consume Context in Task 6) - -- [ ] **Step 2: Copy MessageItem.tsx to MessageBubble.tsx** - -Copy `chat/components/MessageItem.tsx` to `chat/conversation/MessageBubble.tsx`. Update: -- Rename component from `MessageItem` to `MessageBubble` -- Rename `MessageItemProps` to `MessageBubbleProps` -- Update import paths for types (now `../../types` instead of `../types`) - -- [ ] **Step 3: Move ChatInput.tsx** - -Copy `chat/components/ChatInput.tsx` to `chat/conversation/ChatInput.tsx`. Update import paths: -- `../hooks/useFileUpload` → `../../hooks/useFileUpload` (or from hooks directory) -- `../services/modeHandlers/types` → `../../services/modeHandlers/types` - -- [ ] **Step 4: Create ConversationPanel.tsx** - -This component replaces the middle section of ChatInterface (lines 561-616). It consumes ChatStateContext: - -```typescript -'use client' - -import React, { useRef, useEffect, useMemo } from 'react' -import { useChatState, useChatStream } from '../ChatProvider' -import MessageList from './MessageList' -import ChatInput from './ChatInput' -import type { ToolCall } from '../types' - -interface ConversationPanelProps { - onSubmit: (text: string, mode?: string, graphId?: string | null, files?: any[]) => void - onStop: () => void - onToolClick: (toolCall: ToolCall) => void -} - -export default function ConversationPanel({ onSubmit, onStop, onToolClick }: ConversationPanelProps) { - const { state, dispatch } = useChatState() - const stream = useChatStream() - const scrollRef = useRef(null) - - // Auto-scroll - useEffect(() => { - const id = requestAnimationFrame(() => { - requestAnimationFrame(() => { - if (scrollRef.current) { - scrollRef.current.scrollTop = scrollRef.current.scrollHeight - } - }) - }) - return () => cancelAnimationFrame(id) - }, [state.messages, stream.isProcessing]) - - const agentStatus = useMemo<'idle' | 'running' | 'connecting' | 'error'>( - () => (stream.isProcessing || stream.isSubmitting ? 'running' : 'idle'), - [stream.isProcessing, stream.isSubmitting], - ) - - const lastMsg = state.messages[state.messages.length - 1] - const streamingText = useMemo(() => { - if (!lastMsg || lastMsg.role !== 'assistant') return '' - if (!stream.isProcessing && !lastMsg.isStreaming) return '' - return lastMsg.content ?? '' - }, [lastMsg, stream.isProcessing]) - - const currentNodeLabel = useMemo( - () => lastMsg?.role === 'assistant' ? (lastMsg.metadata?.currentNode ?? undefined) : undefined, - [lastMsg], - ) - - return ( -
-
-
- -
-
-
- dispatch({ type: 'SET_INPUT', value: v })} - onSubmit={onSubmit} - isProcessing={stream.isProcessing} - onStop={onStop} - currentMode={state.mode.currentMode} - currentGraphId={state.mode.currentGraphId} - /> -
-
- ) -} -``` - -- [ ] **Step 5: Create barrel export** - -```typescript -// chat/conversation/index.ts -export { default as ConversationPanel } from './ConversationPanel' -export { default as MessageList } from './MessageList' -export { default as MessageBubble } from './MessageBubble' -export { default as ChatInput } from './ChatInput' -``` - -- [ ] **Step 6: Verify TypeScript compiles** - -Run: `cd frontend && npx tsc --noEmit --pretty 2>&1 | head -30` - -- [ ] **Step 7: Commit** - -```bash -git add frontend/app/chat/conversation/ -git commit -m "refactor(chat): create conversation/ module with ConversationPanel, MessageList, MessageBubble" -``` - ---- - -### Task 5: Create PreviewPanel and FileTreePreview - -**Files:** -- Create: `chat/preview/PreviewPanel.tsx` -- Create: `chat/preview/FileTreePreview.tsx` (from ArtifactPanel) -- Create: `chat/preview/index.ts` -- Create: `chat/hooks/usePreviewTrigger.ts` - -- [ ] **Step 1: Copy ArtifactPanel.tsx to FileTreePreview.tsx** - -Copy `chat/components/ArtifactPanel.tsx` (198 lines) to `chat/preview/FileTreePreview.tsx`. Update: -- Rename component from `ArtifactPanel` to `FileTreePreview` -- Update import paths - -- [ ] **Step 2: Create PreviewPanel.tsx** - -Tab-based container wrapping FileTreePreview and tool detail: - -```typescript -'use client' - -import React, { useState } from 'react' -import { X, FolderTree, Wrench } from 'lucide-react' -import { useChatState } from '../ChatProvider' -import FileTreePreview from './FileTreePreview' -import type { ToolCall } from '../types' - -export default function PreviewPanel() { - const { state, dispatch } = useChatState() - const { preview, ui, threadId } = state - const [activeTab, setActiveTab] = useState<'files' | 'tool'>('files') - - const hasFiles = Object.keys(preview.fileTree).length > 0 - const hasTool = !!ui.selectedTool - - return ( -
- {/* Header with tabs */} -
-
- {hasFiles && ( - - )} - {hasTool && ( - - )} -
- -
- - {/* Content */} -
- {activeTab === 'files' && threadId && ( - - )} - {activeTab === 'tool' && ui.selectedTool && ( -
- {/* Inline tool detail — simplified from ToolExecutionPanel for now */} -
{JSON.stringify(ui.selectedTool, null, 2)}
-
- )} -
-
- ) -} -``` - -- [ ] **Step 3: Create usePreviewTrigger.ts** - -```typescript -'use client' - -import { useEffect, useRef } from 'react' -import type { ChatState, ChatAction } from './useChatReducer' - -export function usePreviewTrigger( - state: ChatState, - dispatch: React.Dispatch, -) { - const prevFileCountRef = useRef(0) - - useEffect(() => { - const currentFileCount = Object.keys(state.preview.fileTree).length - const isNew = currentFileCount > prevFileCountRef.current - - // Auto-show when new files appear (unless user dismissed) - if (isNew && currentFileCount > 0 && !state.preview.visible && !state.preview.userDismissed) { - dispatch({ type: 'SHOW_PREVIEW', tab: 'files' }) - } - - prevFileCountRef.current = currentFileCount - }, [state.preview.fileTree, state.preview.visible, state.preview.userDismissed, dispatch]) - - // Auto-hide when stream ends with no files - useEffect(() => { - if ( - !state.streaming.isProcessing && - Object.keys(state.preview.fileTree).length === 0 && - state.preview.visible - ) { - dispatch({ type: 'HIDE_PREVIEW' }) - } - }, [state.streaming.isProcessing, state.preview.fileTree, state.preview.visible, dispatch]) -} -``` - -- [ ] **Step 4: Create barrel export** - -```typescript -// chat/preview/index.ts -export { default as PreviewPanel } from './PreviewPanel' -export { default as FileTreePreview } from './FileTreePreview' -``` - -- [ ] **Step 5: Verify TypeScript compiles** - -Run: `cd frontend && npx tsc --noEmit --pretty 2>&1 | head -30` - -- [ ] **Step 6: Commit** - -```bash -git add frontend/app/chat/preview/ frontend/app/chat/hooks/usePreviewTrigger.ts -git commit -m "refactor(chat): create preview/ module with PreviewPanel, FileTreePreview, usePreviewTrigger" -``` - ---- - -### Task 6: Create ChatLayout and wire everything together - -**Files:** -- Create: `chat/ChatLayout.tsx` -- Modify: `chat/ChatInterface.tsx` → rename to `chat/ChatPage.tsx` -- Modify: `chat/page.tsx` - -- [ ] **Step 1: Create ChatLayout.tsx** - -```typescript -'use client' - -import React, { useEffect, useCallback } from 'react' -import { useRouter } from 'next/navigation' -import { ResizablePanelGroup, ResizablePanel, ResizableHandle } from '@/components/ui/resizable' -import { useDeployedGraphs, useWorkspaces } from '@/hooks/queries' -import { useAvailableModels } from '@/hooks/queries/models' -import { useTranslation } from '@/lib/i18n' -import { conversationService } from '@/services/conversationService' - -import ChatSidebar from './components/ChatSidebar' -import ChatHome from './components/ChatHome' -import { ConversationPanel } from './conversation' -import { PreviewPanel } from './preview' -import { useChatState, useChatStream } from './ChatProvider' -import { useBackendChatStream } from './hooks/useBackendChatStream' -import { usePreviewTrigger } from './hooks/usePreviewTrigger' -import { graphResolutionService } from './services/graphResolutionService' -import { generateId, Message } from './types' -import type { ToolCall } from './types' -// Import model notice dialog (extracted from old ChatInterface) -import { ModelNoticeDialog } from './components/ModelNoticeDialog' - -export default function ChatLayout({ chatId: propChatId }: { chatId?: string | null }) { - const { state, dispatch } = useChatState() - const stream = useChatStream() - const { t } = useTranslation() - const router = useRouter() - - // Data fetching - const { data: deployedAgents = [] } = useDeployedGraphs() - const { data: workspacesData } = useWorkspaces() - const personalWorkspaceId = workspacesData?.find((w) => w.type === 'personal')?.id ?? null - const { data: availableModels = [], isSuccess: modelsLoaded, isError: modelsError } = useAvailableModels('chat', { enabled: true }) - - // Hook integrations - const { sendMessage, stopMessage } = useBackendChatStream(dispatch) - usePreviewTrigger(state, dispatch) - - // Keyboard shortcut: Cmd+B toggle sidebar - useEffect(() => { - const handleKeyDown = (e: KeyboardEvent) => { - if ((e.metaKey || e.ctrlKey) && e.key === 'b') { - e.preventDefault() - dispatch({ type: 'TOGGLE_SIDEBAR' }) - } - } - window.addEventListener('keydown', handleKeyDown) - return () => window.removeEventListener('keydown', handleKeyDown) - }, [dispatch]) - - // Model notice check - // ... (migrate useEffect from ChatInterface lines 86-103) - - // Sync propChatId - // ... (migrate useEffect from ChatInterface lines 172-190) - - // handleSubmit, handleSelectConversation, handleNewChat, handleToolClick - // ... (migrate from ChatInterface lines 244-430, using dispatch instead of setState) - - const hasMessages = state.messages.length > 0 || !!state.threadId || !!propChatId - - return ( -
- - {/* Sidebar */} - {state.ui.sidebarVisible && ( - <> - - dispatch({ type: 'TOGGLE_SIDEBAR' })} - onSelectConversation={handleSelectConversation} - currentThreadId={state.threadId} - onNewChat={handleNewChat} - /> - - - - )} - - {/* Conversation */} - - {!hasMessages ? ( - stopMessage(state.threadId)} - /> - ) : ( - stopMessage(state.threadId)} - onToolClick={handleToolClick} - /> - )} - - - {/* Preview */} - {state.preview.visible && ( - <> - - - - - - )} - - - -
- ) -} -``` - -Note: The `handleSubmit`, `handleSelectConversation`, `handleNewChat`, `handleToolClick` callbacks are migrated directly from ChatInterface lines 244-430, replacing all `setXxx()` calls with `dispatch({ type: 'XXX', ... })`. - -- [ ] **Step 2: Rename ChatInterface.tsx to ChatPage.tsx** - -Replace entire content with: - -```typescript -'use client' - -import { ChatProvider } from './ChatProvider' -import ChatLayout from './ChatLayout' - -interface ChatPageProps { - chatId?: string | null -} - -export default function ChatPage({ chatId }: ChatPageProps) { - return ( - - - - ) -} -``` - -- [ ] **Step 3: Update page.tsx import** - -In `chat/page.tsx` (line 5), change: -```typescript -// Before: -import ChatInterface from './ChatInterface' -// After: -import ChatPage from './ChatPage' -``` - -And update the JSX (line 15): -```typescript -// Before: - -// After: - -``` - -- [ ] **Step 4: Extract ModelNoticeDialog** - -Create `chat/components/ModelNoticeDialog.tsx` containing the AlertDialog from ChatInterface lines 648-679. This component consumes ChatStateContext for `showNoDefaultModelNotice` and dispatches `DISMISS_MODEL_NOTICE`. - -- [ ] **Step 5: Verify app compiles and runs** - -Run: `cd frontend && npx tsc --noEmit --pretty 2>&1 | head -30` -Run: `cd frontend && npm run dev` — verify chat page loads, can send messages, sidebar works, conversation switching works. - -- [ ] **Step 6: Commit** - -```bash -git add frontend/app/chat/ChatLayout.tsx frontend/app/chat/ChatPage.tsx frontend/app/chat/page.tsx frontend/app/chat/components/ModelNoticeDialog.tsx -git rm frontend/app/chat/ChatInterface.tsx -git commit -m "refactor(chat): replace ChatInterface with ChatProvider + ChatLayout + ChatPage (<50 lines)" -``` - ---- - -## Phase 2: Interaction Enhancement - -### Task 7: Shared ToolCallDisplay components - -**Files:** -- Create: `shared/ToolCallDisplay/toolDisplayRegistry.ts` -- Create: `shared/ToolCallDisplay/ToolCallBadge.tsx` -- Create: `shared/ToolCallDisplay/ToolCallDetail.tsx` -- Create: `shared/ToolCallDisplay/index.ts` - -- [ ] **Step 1: Create toolDisplayRegistry.ts** - -Migrate and generalize from `skills/creator/components/toolDisplayUtils.ts` (140 lines). Include all tool name mappings from that file plus the icon logic from `MessageItem.tsx`'s `ToolCallItem`. - -```typescript -export interface ToolDisplayConfig { - label: string - icon?: string // lucide icon name - formatArgs?: (args: Record) => string - formatDetail?: (args: Record) => string - category: 'file' | 'code' | 'search' | 'network' | 'other' -} - -const registry = new Map() - -// Register all known tools (from toolDisplayUtils.ts) -function registerDefaults() { - register('read_file', { label: 'Reading', category: 'file', formatArgs: a => shortenPath(a.path || a.file_path) }) - register('read', { label: 'Reading', category: 'file', formatArgs: a => shortenPath(a.path || a.file_path) }) - register('write_file', { label: 'Writing', category: 'file', formatArgs: a => shortenPath(a.path || a.file_path) }) - register('write', { label: 'Writing', category: 'file', formatArgs: a => shortenPath(a.path || a.file_path) }) - register('create_file', { label: 'Creating', category: 'file', formatArgs: a => shortenPath(a.path || a.file_path) }) - register('edit_file', { label: 'Editing', category: 'file', formatArgs: a => shortenPath(a.path || a.file_path) }) - register('edit', { label: 'Editing', category: 'file', formatArgs: a => shortenPath(a.path || a.file_path) }) - register('str_replace_editor', { label: 'Editing', category: 'file', formatArgs: a => shortenPath(a.path) }) - register('execute', { label: 'Executing', category: 'code', formatArgs: a => truncate(a.command, 60) }) - register('bash', { label: 'Running', category: 'code', formatArgs: a => truncate(a.command, 60) }) - register('run_command', { label: 'Running', category: 'code', formatArgs: a => truncate(a.command, 60) }) - register('python', { label: 'Python', category: 'code', formatArgs: a => truncate(a.code?.split('\n')[0], 60) }) - register('python_interpreter', { label: 'Python', category: 'code' }) - register('web_search', { label: 'Searching', category: 'search', formatArgs: a => a.query }) - register('preview_skill', { label: 'Deploying skill', category: 'other', formatArgs: a => a.skill_name }) - register('glob', { label: 'Finding files', category: 'search' }) - register('find_files', { label: 'Finding files', category: 'search' }) - register('grep', { label: 'Searching code', category: 'search' }) - register('search', { label: 'Searching', category: 'search' }) - register('ls', { label: 'Listing', category: 'file', formatArgs: a => shortenPath(a.path) }) - register('list_directory', { label: 'Listing', category: 'file', formatArgs: a => shortenPath(a.path) }) - register('think', { label: 'Thinking...', category: 'other' }) - register('reasoning', { label: 'Thinking...', category: 'other' }) - register('write_todos', { label: 'Updating plan', category: 'other' }) - register('todo_write', { label: 'Updating plan', category: 'other' }) - register('planner', { label: 'Planning', category: 'other' }) -} - -export function register(name: string, config: ToolDisplayConfig) { registry.set(name, config) } -export function getToolDisplay(name: string, args: Record): { label: string; detail: string; category: string } { /* ... */ } - -function shortenPath(p?: string): string { /* from toolDisplayUtils.ts */ } -function truncate(s?: string, max = 60): string { /* ... */ } - -registerDefaults() -``` - -- [ ] **Step 2: Create ToolCallBadge.tsx** - -Compact inline badge showing icon + label + args summary + status indicator. Replaces `ToolCallItem` inside `MessageItem.tsx`. - -- [ ] **Step 3: Create ToolCallDetail.tsx** - -Generalized from `ToolExecutionPanel.tsx` (375 lines) — shows tool input JSON, output (JSON/Markdown/text), copy buttons, timestamp. - -- [ ] **Step 4: Create barrel export** - -- [ ] **Step 5: Integrate into MessageBubble.tsx** - -Replace the inline `ToolCallItem` in `MessageBubble.tsx` with `ToolCallBadge` from the shared library. - -- [ ] **Step 6: Verify TypeScript compiles and tool display renders correctly** - -- [ ] **Step 7: Commit** - -```bash -git add frontend/app/shared/ToolCallDisplay/ -git commit -m "feat(chat): add shared ToolCallDisplay with human-readable tool labels and badge" -``` - ---- - -### Task 8: Persistent preview panel (layout switch) - -**Files:** -- Modify: `chat/ChatLayout.tsx` (already has ResizablePanel structure from Task 6) -- Delete: `chat/components/ArtifactsDrawer.tsx` -- Delete: `chat/components/CompactArtifactStatus.tsx` -- Delete: `chat/components/ToolExecutionPanel.tsx` -- Delete: `chat/components/CompactToolStatus.tsx` -- Modify: `chat/conversation/ChatInput.tsx` — remove `compactToolStatus` and `compactArtifactStatus` props - -- [ ] **Step 1: Remove overlay rendering from ChatLayout** - -Remove the `renderFloatingPanel` pattern and the `SIDE_PANEL_WIDTH` / `CONTENT_PR` / `CONTENT_MR` constants. The ResizablePanel preview column replaces them. - -- [ ] **Step 2: Remove compact status slot props from ChatInput** - -Remove `compactToolStatus` and `compactArtifactStatus` props from `ChatInput.tsx` — the preview panel is now always visible when content exists, no compact trigger needed. - -- [ ] **Step 3: Delete old overlay components** - -```bash -git rm frontend/app/chat/components/ArtifactsDrawer.tsx -git rm frontend/app/chat/components/CompactArtifactStatus.tsx -git rm frontend/app/chat/components/ToolExecutionPanel.tsx -git rm frontend/app/chat/components/CompactToolStatus.tsx -``` - -- [ ] **Step 4: Add responsive fallback** - -In `ChatLayout.tsx`, add a media query check. For `window.innerWidth < 768`, render PreviewPanel as a slide-over overlay instead of a ResizablePanel column. - -- [ ] **Step 5: Verify app works end-to-end** - -Manual test: send a message that triggers file creation, verify preview panel auto-opens with file tree, close it manually, send another message and verify `userDismissed` suppresses auto-open, verify new conversation resets the flag. - -- [ ] **Step 6: Commit** - -```bash -git add -A -git commit -m "feat(chat): replace overlay panels with persistent ResizablePanel preview column" -``` - ---- - -### Task 9: Per-message ActionBar - -**Files:** -- Create: `shared/ActionBar/ActionBar.tsx` -- Create: `shared/ActionBar/actions/CopyAction.tsx` -- Create: `shared/ActionBar/actions/RetryAction.tsx` -- Create: `shared/ActionBar/index.ts` -- Modify: `chat/conversation/MessageBubble.tsx` - -- [ ] **Step 1: Create ActionBar.tsx** - -```typescript -interface ActionConfig { - id: string - label: string - icon: React.ComponentType<{ size?: number }> - onClick: () => void - show?: boolean -} - -interface ActionBarProps { - actions: ActionConfig[] - visible: boolean // hover state or isLast -} -``` - -Renders a row of icon buttons. Fade in/out via `opacity` + `transition`. - -- [ ] **Step 2: Create CopyAction and RetryAction** - -`CopyAction`: copies message content to clipboard, shows checkmark for 2s. -`RetryAction`: calls `onRetry(messageId)` which re-sends the preceding user message. - -- [ ] **Step 3: Integrate into MessageBubble** - -Add `ActionBar` below assistant message content. Show on hover (via `group-hover` Tailwind class). Always show on the last assistant message. - -- [ ] **Step 4: Create barrel export** - -- [ ] **Step 5: Verify actions work** - -- [ ] **Step 6: Commit** - -```bash -git add frontend/app/shared/ActionBar/ -git commit -m "feat(chat): add per-message ActionBar with Copy and Retry actions" -``` - ---- - -### Task 10: Starter prompts for guided entry - -**Files:** -- Create: `shared/StarterPrompts/StarterPrompts.tsx` -- Create: `shared/StarterPrompts/index.ts` -- Modify: `chat/config/modeConfig.ts` — add `starterPrompts` field -- Modify: `chat/components/ChatHome.tsx` — integrate StarterPrompts - -- [ ] **Step 1: Add starterPrompts to ModeConfig** - -In `modeConfig.ts`, extend the interface: - -```typescript -export interface StarterPrompt { - labelKey: string - promptKey: string - icon: any -} - -export interface ModeConfig { - // ... existing fields - starterPrompts?: StarterPrompt[] -} -``` - -Add prompts to `default-chat`: -```typescript -{ - id: 'default-chat', - // ... existing - starterPrompts: [ - { labelKey: 'chat.starter.analyzeCode', promptKey: 'chat.starter.analyzeCodePrompt', icon: Code }, - { labelKey: 'chat.starter.writeScript', promptKey: 'chat.starter.writeScriptPrompt', icon: Terminal }, - { labelKey: 'chat.starter.explainConcept', promptKey: 'chat.starter.explainConceptPrompt', icon: BookOpen }, - ] -} -``` - -- [ ] **Step 2: Create StarterPrompts.tsx** - -```typescript -interface StarterPromptsProps { - prompts: StarterPrompt[] - onSelect: (prompt: string) => void -} -``` - -Renders a row of clickable chips. On click: calls `onSelect(t(prompt.promptKey))`, adds brief highlight animation. - -- [ ] **Step 3: Integrate into ChatHome** - -Below the mode cards in `ChatHome.tsx`, render `StarterPrompts` for the currently selected mode. On select, fill the input and focus the textarea. - -- [ ] **Step 4: Commit** - -```bash -git add frontend/app/shared/StarterPrompts/ frontend/app/chat/config/modeConfig.ts frontend/app/chat/components/ChatHome.tsx -git commit -m "feat(chat): add starter prompts for guided entry experience" -``` - ---- - -### Task 11: Skill Creator migration to shared components - -**Files:** -- Modify: `skills/creator/components/SkillCreatorChat.tsx` -- Modify: `skills/creator/components/toolDisplayUtils.ts` → deprecate, import from shared registry - -- [ ] **Step 1: Replace toolDisplayUtils with shared registry** - -In `SkillCreatorChat.tsx`, replace: -```typescript -import { formatToolDisplay } from './toolDisplayUtils' -``` -with: -```typescript -import { getToolDisplay } from '@/app/shared/ToolCallDisplay' -``` - -Update all `formatToolDisplay()` calls to use `getToolDisplay()`. - -- [ ] **Step 2: Replace inline tool badges with ToolCallBadge** - -If the Skill Creator renders custom tool badges inline, replace them with `ToolCallBadge` from the shared library. - -- [ ] **Step 3: Verify Skill Creator still works** - -Manual test: navigate to `/skills/creator`, create a skill with AI, verify tool display is correct and preview works. - -- [ ] **Step 4: Delete or deprecate toolDisplayUtils.ts** - -Add a deprecation comment or delete if all imports are migrated. - -- [ ] **Step 5: Commit** - -```bash -git add frontend/app/skills/creator/ frontend/app/shared/ToolCallDisplay/ -git commit -m "refactor(skills): migrate Skill Creator to shared ToolCallDisplay components" -``` - ---- - -## Final Cleanup - -### Task 12: Remove dead code and update imports - -- [ ] **Step 1: Remove old ThreadContent and MessageItem** - -If no other files import the old components: -```bash -git rm frontend/app/chat/components/ThreadContent.tsx -git rm frontend/app/chat/components/MessageItem.tsx -``` - -Keep `ChatSidebar.tsx` and `ChatHome.tsx` in `components/` — they are still used. - -- [ ] **Step 2: Verify no broken imports** - -Run: `cd frontend && npx tsc --noEmit --pretty 2>&1 | head -50` - -- [ ] **Step 3: Final commit** - -```bash -git add -A -git commit -m "refactor(chat): remove deprecated components and clean up imports" -``` diff --git a/docs/superpowers/plans/2026-03-20-skill-versioning-permissions-api.md b/docs/superpowers/plans/2026-03-20-skill-versioning-permissions-api.md deleted file mode 100644 index 64c17070f..000000000 --- a/docs/superpowers/plans/2026-03-20-skill-versioning-permissions-api.md +++ /dev/null @@ -1,2942 +0,0 @@ -# Skill Versioning, Permissions & Token API Implementation Plan - -> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking. - -**Goal:** Add semantic versioning, team collaboration permissions, and PlatformToken auth to the Skill system. - -**Architecture:** 4 new DB tables (skill_versions, skill_version_files, skill_collaborators, platform_tokens) with zero changes to existing tables. New repos/services/routes follow existing layered pattern (Repository → Service → API). A unified `check_skill_access()` replaces hardcoded `owner_id` checks. Dual-mode auth dependency supports session + `sk_` PlatformToken. - -**Tech Stack:** FastAPI, SQLAlchemy async (PostgreSQL), Alembic, Pydantic v2, `semver` PyPI package, `secrets` stdlib. - -**Spec:** `docs/superpowers/specs/2026-03-20-skill-versioning-permissions-api-design.md` - ---- - -## File Structure - -### New Files -| File | Responsibility | -|------|---------------| -| `backend/app/models/skill_version.py` | SkillVersion + SkillVersionFile ORM models | -| `backend/app/models/skill_collaborator.py` | CollaboratorRole enum + SkillCollaborator ORM model | -| `backend/app/models/platform_token.py` | PlatformToken ORM model | -| `backend/app/schemas/skill_version.py` | Pydantic schemas for version CRUD | -| `backend/app/schemas/skill_collaborator.py` | Pydantic schemas for collaborator CRUD | -| `backend/app/schemas/platform_token.py` | Pydantic schemas for token CRUD | -| `backend/app/repositories/skill_version.py` | SkillVersion + SkillVersionFile repository | -| `backend/app/repositories/skill_collaborator.py` | SkillCollaborator repository | -| `backend/app/repositories/platform_token.py` | PlatformToken repository | -| `backend/app/services/skill_version_service.py` | Version publish/list/restore logic | -| `backend/app/services/skill_collaborator_service.py` | Collaborator CRUD + transfer | -| `backend/app/services/platform_token_service.py` | Token create/list/revoke | -| `backend/app/common/skill_permissions.py` | `check_skill_access()` unified permission check | -| `backend/app/common/auth_dependency.py` | `get_current_user_or_token()` dual-mode auth | -| `backend/app/api/v1/skill_versions.py` | Version API routes | -| `backend/app/api/v1/skill_collaborators.py` | Collaborator API routes | -| `backend/app/api/v1/tokens.py` | Token API routes | -| `backend/alembic/versions/20260321_000011_add_skill_versioning_permissions_tokens.py` | Migration | -| `backend/tests/test_api/test_skill_versions.py` | Version API tests | -| `backend/tests/test_api/test_skill_collaborators.py` | Collaborator API tests | -| `backend/tests/test_api/test_platform_tokens.py` | Token API tests | -| `backend/tests/test_services/test_skill_permissions.py` | Permission utility tests | - -### Modified Files -| File | Change | -|------|--------| -| `backend/app/models/__init__.py` | Register 4 new models + CollaboratorRole enum | -| `backend/app/api/v1/__init__.py` | Register 3 new routers | -| `backend/app/repositories/skill.py` | Extend `list_by_user()` with collaborator subquery | -| `backend/app/services/skill_service.py` | Replace `owner_id` checks with `check_skill_access()` | -| `backend/app/schemas/skill.py` | Add `latest_version` field to SkillSchema | - ---- - -### Task 1: SkillCollaborator Model + CollaboratorRole Enum - -**Files:** -- Create: `backend/app/models/skill_collaborator.py` -- Test: `backend/tests/test_services/test_skill_permissions.py` (placeholder) - -- [ ] **Step 1: Create the SkillCollaborator model file** - -```python -# backend/app/models/skill_collaborator.py -"""Skill Collaborator model — per-skill role-based access control.""" - -from __future__ import annotations - -import enum -from typing import TYPE_CHECKING - -from sqlalchemy import Enum, ForeignKey, Index, String, UniqueConstraint -from sqlalchemy.dialects.postgresql import UUID -from sqlalchemy.orm import Mapped, mapped_column, relationship - -from .base import BaseModel - -if TYPE_CHECKING: - from .auth import AuthUser - from .skill import Skill - - -class CollaboratorRole(str, enum.Enum): - """Roles ordered by privilege: viewer < editor < publisher < admin.""" - viewer = "viewer" - editor = "editor" - publisher = "publisher" - admin = "admin" - - @classmethod - def rank(cls, role: "CollaboratorRole") -> int: - _order = [cls.viewer, cls.editor, cls.publisher, cls.admin] - return _order.index(role) - - def __ge__(self, other): - if not isinstance(other, CollaboratorRole): - return NotImplemented - return CollaboratorRole.rank(self) >= CollaboratorRole.rank(other) - - def __gt__(self, other): - if not isinstance(other, CollaboratorRole): - return NotImplemented - return CollaboratorRole.rank(self) > CollaboratorRole.rank(other) - - def __le__(self, other): - if not isinstance(other, CollaboratorRole): - return NotImplemented - return CollaboratorRole.rank(self) <= CollaboratorRole.rank(other) - - def __lt__(self, other): - if not isinstance(other, CollaboratorRole): - return NotImplemented - return CollaboratorRole.rank(self) < CollaboratorRole.rank(other) - - -class SkillCollaborator(BaseModel): - """Per-skill collaborator with role.""" - - __tablename__ = "skill_collaborators" - - skill_id: Mapped["uuid.UUID"] = mapped_column( - UUID(as_uuid=True), - ForeignKey("skills.id", ondelete="CASCADE"), - nullable=False, - ) - user_id: Mapped[str] = mapped_column( - String(255), - ForeignKey("user.id", ondelete="CASCADE"), - nullable=False, - ) - role: Mapped[CollaboratorRole] = mapped_column( - Enum(CollaboratorRole, name="collaborator_role", create_constraint=True), - nullable=False, - ) - invited_by: Mapped[str] = mapped_column( - String(255), - ForeignKey("user.id", ondelete="CASCADE"), - nullable=False, - ) - - # Relationships - skill: Mapped["Skill"] = relationship("Skill", lazy="selectin") - user: Mapped["AuthUser"] = relationship("AuthUser", foreign_keys=[user_id], lazy="selectin") - inviter: Mapped["AuthUser"] = relationship("AuthUser", foreign_keys=[invited_by], lazy="selectin") - - __table_args__ = ( - UniqueConstraint("skill_id", "user_id", name="skill_collaborators_skill_user_unique"), - Index("skill_collaborators_user_skill_idx", "user_id", "skill_id"), - ) -``` - -Add the missing `import uuid` at the top (after `import enum`): -```python -import uuid -``` - -- [ ] **Step 2: Verify the file is syntactically correct** - -Run: `cd /Users/yuzhenjiang1/Downloads/workspace/JoySafeter/backend && python -c "from app.models.skill_collaborator import SkillCollaborator, CollaboratorRole; print('OK')"` -Expected: `OK` - -- [ ] **Step 3: Commit** - -```bash -git add backend/app/models/skill_collaborator.py -git commit -m "feat(models): add SkillCollaborator model and CollaboratorRole enum" -``` - ---- - -### Task 2: SkillVersion + SkillVersionFile Models - -**Files:** -- Create: `backend/app/models/skill_version.py` - -- [ ] **Step 1: Create the SkillVersion model file** - -```python -# backend/app/models/skill_version.py -"""Immutable skill version snapshots.""" - -from __future__ import annotations - -import uuid -from typing import TYPE_CHECKING, List, Optional - -from sqlalchemy import DateTime, ForeignKey, Index, Integer, String, Text, UniqueConstraint -from sqlalchemy.dialects.postgresql import JSONB, UUID -from sqlalchemy.orm import Mapped, mapped_column, relationship - -from .base import BaseModel - -if TYPE_CHECKING: - from .auth import AuthUser - from .skill import Skill - - -class SkillVersion(BaseModel): - """Published immutable version snapshot of a Skill.""" - - __tablename__ = "skill_versions" - - skill_id: Mapped[uuid.UUID] = mapped_column( - UUID(as_uuid=True), - ForeignKey("skills.id", ondelete="CASCADE"), - nullable=False, - ) - version: Mapped[str] = mapped_column(String(20), nullable=False) - release_notes: Mapped[Optional[str]] = mapped_column(Text, nullable=True) - - # Snapshot fields - skill_name: Mapped[str] = mapped_column(String(64), nullable=False) - skill_description: Mapped[str] = mapped_column(String(1024), nullable=False) - content: Mapped[str] = mapped_column(Text, nullable=False) - tags: Mapped[list] = mapped_column(JSONB, nullable=False, default=list) - meta_data: Mapped[dict] = mapped_column("metadata", JSONB, nullable=False, default=dict) - allowed_tools: Mapped[list] = mapped_column(JSONB, nullable=False, default=list) - compatibility: Mapped[Optional[str]] = mapped_column(String(500), nullable=True) - license: Mapped[Optional[str]] = mapped_column(String(100), nullable=True) - - published_by_id: Mapped[str] = mapped_column( - String(255), - ForeignKey("user.id", ondelete="CASCADE"), - nullable=False, - ) - published_at: Mapped["datetime"] = mapped_column( - DateTime(timezone=True), - nullable=False, - ) - - # Relationships - skill: Mapped["Skill"] = relationship("Skill", lazy="selectin") - published_by: Mapped["AuthUser"] = relationship("AuthUser", lazy="selectin") - files: Mapped[List["SkillVersionFile"]] = relationship( - "SkillVersionFile", - back_populates="version", - cascade="all, delete-orphan", - lazy="selectin", - ) - - __table_args__ = ( - UniqueConstraint("skill_id", "version", name="skill_versions_skill_version_unique"), - Index("skill_versions_skill_idx", "skill_id"), - Index("skill_versions_published_at_idx", "published_at"), - ) - - -class SkillVersionFile(BaseModel): - """File snapshot belonging to a published version.""" - - __tablename__ = "skill_version_files" - - version_id: Mapped[uuid.UUID] = mapped_column( - UUID(as_uuid=True), - ForeignKey("skill_versions.id", ondelete="CASCADE"), - nullable=False, - ) - path: Mapped[str] = mapped_column(String(512), nullable=False) - file_name: Mapped[str] = mapped_column(String(255), nullable=False) - file_type: Mapped[str] = mapped_column(String(50), nullable=False) - content: Mapped[Optional[str]] = mapped_column(Text, nullable=True) - storage_type: Mapped[str] = mapped_column(String(20), nullable=False, default="database") - storage_key: Mapped[Optional[str]] = mapped_column(String(512), nullable=True) - size: Mapped[int] = mapped_column(Integer, nullable=False, default=0) - - # Relationship - version: Mapped["SkillVersion"] = relationship("SkillVersion", back_populates="files", lazy="selectin") - - __table_args__ = ( - Index("skill_version_files_version_idx", "version_id"), - ) -``` - -Add `from datetime import datetime` at top (after `import uuid`). - -- [ ] **Step 2: Verify syntax** - -Run: `cd /Users/yuzhenjiang1/Downloads/workspace/JoySafeter/backend && python -c "from app.models.skill_version import SkillVersion, SkillVersionFile; print('OK')"` -Expected: `OK` - -- [ ] **Step 3: Commit** - -```bash -git add backend/app/models/skill_version.py -git commit -m "feat(models): add SkillVersion and SkillVersionFile models" -``` - ---- - -### Task 3: PlatformToken Model - -**Files:** -- Create: `backend/app/models/platform_token.py` - -- [ ] **Step 1: Create the PlatformToken model file** - -```python -# backend/app/models/platform_token.py -"""Universal PlatformToken for API authentication.""" - -from __future__ import annotations - -import uuid -from datetime import datetime -from typing import TYPE_CHECKING, Optional - -from sqlalchemy import Boolean, DateTime, ForeignKey, Index, String -from sqlalchemy.dialects.postgresql import JSONB, UUID -from sqlalchemy.orm import Mapped, mapped_column, relationship - -from .base import BaseModel - -if TYPE_CHECKING: - from .auth import AuthUser - - -class PlatformToken(BaseModel): - """API token with scoped permissions.""" - - __tablename__ = "platform_tokens" - - user_id: Mapped[str] = mapped_column( - String(255), - ForeignKey("user.id", ondelete="CASCADE"), - nullable=False, - ) - name: Mapped[str] = mapped_column(String(255), nullable=False) - token_hash: Mapped[str] = mapped_column(String(64), nullable=False, unique=True) - token_prefix: Mapped[str] = mapped_column(String(12), nullable=False) - scopes: Mapped[list] = mapped_column(JSONB, nullable=False, default=list) - resource_type: Mapped[Optional[str]] = mapped_column(String(50), nullable=True) - resource_id: Mapped[Optional[uuid.UUID]] = mapped_column(UUID(as_uuid=True), nullable=True) - expires_at: Mapped[Optional[datetime]] = mapped_column(DateTime(timezone=True), nullable=True) - last_used_at: Mapped[Optional[datetime]] = mapped_column(DateTime(timezone=True), nullable=True) - is_active: Mapped[bool] = mapped_column(Boolean, nullable=False, default=True) - - # Relationship - user: Mapped["AuthUser"] = relationship("AuthUser", lazy="selectin") - - __table_args__ = ( - Index("platform_tokens_user_idx", "user_id"), - Index("platform_tokens_hash_idx", "token_hash"), - Index("platform_tokens_active_idx", "is_active"), - ) -``` - -- [ ] **Step 2: Verify syntax** - -Run: `cd /Users/yuzhenjiang1/Downloads/workspace/JoySafeter/backend && python -c "from app.models.platform_token import PlatformToken; print('OK')"` -Expected: `OK` - -- [ ] **Step 3: Commit** - -```bash -git add backend/app/models/platform_token.py -git commit -m "feat(models): add PlatformToken model" -``` - ---- - -### Task 4: Register Models in `__init__.py` - -**Files:** -- Modify: `backend/app/models/__init__.py` - -- [ ] **Step 1: Add imports and __all__ entries** - -Add these imports after `from .skill import Skill, SkillFile`: -```python -from .skill_collaborator import CollaboratorRole, SkillCollaborator -from .skill_version import SkillVersion, SkillVersionFile -from .platform_token import PlatformToken -``` - -Add to `__all__`: -```python - "CollaboratorRole", - "SkillCollaborator", - "SkillVersion", - "SkillVersionFile", - "PlatformToken", -``` - -- [ ] **Step 2: Verify import** - -Run: `cd /Users/yuzhenjiang1/Downloads/workspace/JoySafeter/backend && python -c "from app.models import SkillVersion, SkillVersionFile, SkillCollaborator, CollaboratorRole, PlatformToken; print('OK')"` -Expected: `OK` - -- [ ] **Step 3: Commit** - -```bash -git add backend/app/models/__init__.py -git commit -m "feat(models): register new models in __init__.py" -``` - ---- - -### Task 5: Alembic Migration - -**Files:** -- Create: `backend/alembic/versions/20260321_000011_add_skill_versioning_permissions_tokens.py` - -- [ ] **Step 1: Generate migration** - -Run: `cd /Users/yuzhenjiang1/Downloads/workspace/JoySafeter/backend && alembic revision --autogenerate -m "add_skill_versioning_permissions_tokens"` - -If autogenerate fails, create manually. The migration must create these 4 tables: -- `skill_collaborators` (with `collaborator_role` enum type) -- `skill_versions` -- `skill_version_files` -- `platform_tokens` - -- [ ] **Step 2: Review the generated migration** - -Open the generated file and verify: -1. `collaborator_role` enum is created before table -2. All columns, FKs, indexes, unique constraints match the spec -3. `downgrade()` drops tables in correct order (version_files before versions) and drops the enum - -- [ ] **Step 3: Test migration runs** - -Run: `cd /Users/yuzhenjiang1/Downloads/workspace/JoySafeter/backend && alembic upgrade head` -Expected: No errors, 4 tables created. - -- [ ] **Step 4: Commit** - -```bash -git add backend/alembic/versions/ -git commit -m "feat(migration): add skill_versions, skill_collaborators, platform_tokens tables" -``` - ---- - -### Task 6: Skill Permission Utility — `check_skill_access()` - -**Files:** -- Create: `backend/app/common/skill_permissions.py` -- Test: `backend/tests/test_services/test_skill_permissions.py` - -- [ ] **Step 1: Write failing tests** - -```python -# backend/tests/test_services/test_skill_permissions.py -"""Tests for check_skill_access unified permission check.""" - -import uuid -from unittest.mock import AsyncMock, MagicMock, patch - -import pytest - -from app.common.exceptions import ForbiddenException -from app.common.skill_permissions import check_skill_access -from app.models.skill_collaborator import CollaboratorRole - - -def _make_skill(owner_id="owner-1", is_public=False): - skill = MagicMock() - skill.id = uuid.uuid4() - skill.owner_id = owner_id - skill.is_public = is_public - return skill - - -def _make_user(user_id="user-1", is_superuser=False): - user = MagicMock() - user.id = user_id - user.is_superuser = is_superuser - return user - - -@pytest.mark.asyncio -async def test_superuser_always_passes(): - skill = _make_skill(owner_id="other") - db = AsyncMock() - await check_skill_access(db, skill, "super-1", CollaboratorRole.admin, is_superuser=True) - # Should not raise - - -@pytest.mark.asyncio -async def test_owner_always_passes(): - skill = _make_skill(owner_id="owner-1") - db = AsyncMock() - await check_skill_access(db, skill, "owner-1", CollaboratorRole.admin) - # Should not raise - - -@pytest.mark.asyncio -async def test_collaborator_with_sufficient_role(): - skill = _make_skill(owner_id="other") - db = AsyncMock() - mock_collab = MagicMock() - mock_collab.role = CollaboratorRole.editor - with patch("app.common.skill_permissions._get_collaborator", return_value=mock_collab): - await check_skill_access(db, skill, "user-1", CollaboratorRole.editor) - - -@pytest.mark.asyncio -async def test_collaborator_with_insufficient_role(): - skill = _make_skill(owner_id="other") - db = AsyncMock() - mock_collab = MagicMock() - mock_collab.role = CollaboratorRole.viewer - with patch("app.common.skill_permissions._get_collaborator", return_value=mock_collab): - with pytest.raises(ForbiddenException): - await check_skill_access(db, skill, "user-1", CollaboratorRole.editor) - - -@pytest.mark.asyncio -async def test_public_skill_viewer_access(): - skill = _make_skill(owner_id="other", is_public=True) - db = AsyncMock() - with patch("app.common.skill_permissions._get_collaborator", return_value=None): - await check_skill_access(db, skill, "user-1", CollaboratorRole.viewer) - - -@pytest.mark.asyncio -async def test_public_skill_editor_access_denied(): - skill = _make_skill(owner_id="other", is_public=True) - db = AsyncMock() - with patch("app.common.skill_permissions._get_collaborator", return_value=None): - with pytest.raises(ForbiddenException): - await check_skill_access(db, skill, "user-1", CollaboratorRole.editor) - - -@pytest.mark.asyncio -async def test_token_scope_check_passes(): - skill = _make_skill(owner_id="owner-1") - db = AsyncMock() - await check_skill_access( - db, skill, "owner-1", CollaboratorRole.viewer, - token_scopes=["skills:read"], required_scope="skills:read", - ) - - -@pytest.mark.asyncio -async def test_token_scope_check_fails(): - skill = _make_skill(owner_id="owner-1") - db = AsyncMock() - with pytest.raises(ForbiddenException): - await check_skill_access( - db, skill, "owner-1", CollaboratorRole.viewer, - token_scopes=["skills:read"], required_scope="skills:write", - ) -``` - -- [ ] **Step 2: Run tests to verify they fail** - -Run: `cd /Users/yuzhenjiang1/Downloads/workspace/JoySafeter/backend && python -m pytest tests/test_services/test_skill_permissions.py -v` -Expected: FAIL — `ModuleNotFoundError: No module named 'app.common.skill_permissions'` - -- [ ] **Step 3: Implement `check_skill_access()`** - -```python -# backend/app/common/skill_permissions.py -"""Unified skill permission check — replaces hardcoded owner_id comparisons.""" - -from __future__ import annotations - -from typing import List, Optional - -from sqlalchemy import and_, select -from sqlalchemy.ext.asyncio import AsyncSession - -from app.common.exceptions import ForbiddenException -from app.models.skill import Skill -from app.models.skill_collaborator import CollaboratorRole, SkillCollaborator - - -async def _get_collaborator( - db: AsyncSession, - skill_id, - user_id: str, -) -> Optional[SkillCollaborator]: - result = await db.execute( - select(SkillCollaborator).where( - and_( - SkillCollaborator.skill_id == skill_id, - SkillCollaborator.user_id == user_id, - ) - ) - ) - return result.scalar_one_or_none() - - -async def check_skill_access( - db: AsyncSession, - skill: Skill, - user_id: str, - min_role: CollaboratorRole, - *, - is_superuser: bool = False, - token_scopes: Optional[List[str]] = None, - required_scope: Optional[str] = None, -) -> None: - """ - Unified permission check. - - Raises ForbiddenException if the user lacks sufficient access. - """ - # 1. Superuser bypass - if is_superuser: - _check_token_scope(token_scopes, required_scope) - return - - # 2. Owner always passes - if skill.owner_id and skill.owner_id == user_id: - _check_token_scope(token_scopes, required_scope) - return - - # 3. Check collaborator role - collab = await _get_collaborator(db, skill.id, user_id) - if collab and collab.role >= min_role: - _check_token_scope(token_scopes, required_scope) - return - - # 4. Public skill + viewer access - if skill.is_public and min_role == CollaboratorRole.viewer: - _check_token_scope(token_scopes, required_scope) - return - - raise ForbiddenException("You don't have permission to access this skill") - - -def _check_token_scope( - token_scopes: Optional[List[str]], - required_scope: Optional[str], -) -> None: - """If request came via PlatformToken, verify scope.""" - if token_scopes is not None and required_scope is not None: - if required_scope not in token_scopes: - raise ForbiddenException(f"Token missing required scope: {required_scope}") -``` - -- [ ] **Step 4: Run tests to verify they pass** - -Run: `cd /Users/yuzhenjiang1/Downloads/workspace/JoySafeter/backend && python -m pytest tests/test_services/test_skill_permissions.py -v` -Expected: All 8 tests PASS - -- [ ] **Step 5: Commit** - -```bash -git add backend/app/common/skill_permissions.py backend/tests/test_services/test_skill_permissions.py -git commit -m "feat(permissions): add check_skill_access() unified permission utility" -``` - ---- - -### Task 7: Dual-Mode Auth Dependency — `get_current_user_or_token()` - -**Files:** -- Create: `backend/app/common/auth_dependency.py` - -- [ ] **Step 1: Implement the dual-mode auth dependency** - -```python -# backend/app/common/auth_dependency.py -"""Dual-mode authentication: session/JWT + PlatformToken (sk_ prefix).""" - -from __future__ import annotations - -import hashlib -from dataclasses import dataclass -from datetime import datetime, timezone -from typing import List, Optional - -from fastapi import Depends, Request -from fastapi.security import OAuth2PasswordBearer -from sqlalchemy import select -from sqlalchemy.ext.asyncio import AsyncSession - -from app.common.dependencies import get_current_user -from app.common.exceptions import UnauthorizedException -from app.core.database import get_db -from app.models.auth import AuthUser as User -from app.models.platform_token import PlatformToken - -oauth2_scheme = OAuth2PasswordBearer(tokenUrl="/api/v1/auth/login", auto_error=False) - -# Debounce interval for last_used_at updates (5 minutes) -_LAST_USED_DEBOUNCE_SECONDS = 300 - - -@dataclass -class AuthContext: - """Result of authentication — carries user + optional token scopes.""" - user: User - token_scopes: Optional[List[str]] = None - - @property - def is_token_auth(self) -> bool: - return self.token_scopes is not None - - -async def get_current_user_or_token( - token: Optional[str] = Depends(oauth2_scheme), - request: Request = None, - db: AsyncSession = Depends(get_db), -) -> AuthContext: - """ - Authenticate via session/JWT or PlatformToken. - - If Bearer token starts with 'sk_', route to PlatformToken path. - Otherwise, fall through to existing session/JWT auth. - """ - # Try to extract token from cookie if not in header - raw_token = token - if not raw_token and request: - from app.core.settings import settings - raw_token = ( - request.cookies.get(settings.cookie_name) - or request.cookies.get("session-token") - or request.cookies.get("session_token") - or request.cookies.get("access_token") - or request.cookies.get("Authorization") - or request.cookies.get("auth_token") - ) - - # PlatformToken path - if raw_token and raw_token.startswith("sk_"): - return await _authenticate_platform_token(raw_token, db) - - # Fall through to existing session/JWT auth - user = await get_current_user(token=token, request=request, db=db) - return AuthContext(user=user, token_scopes=None) - - -async def _authenticate_platform_token( - raw_token: str, - db: AsyncSession, -) -> AuthContext: - """Verify a PlatformToken and return AuthContext with scopes.""" - token_hash = hashlib.sha256(raw_token.encode()).hexdigest() - - result = await db.execute( - select(PlatformToken).where(PlatformToken.token_hash == token_hash) - ) - pt = result.scalar_one_or_none() - - if not pt: - raise UnauthorizedException("Invalid API token") - - if not pt.is_active: - raise UnauthorizedException("API token has been revoked") - - if pt.expires_at and pt.expires_at < datetime.now(timezone.utc): - raise UnauthorizedException("API token has expired") - - # Debounce last_used_at update - now = datetime.now(timezone.utc) - if not pt.last_used_at or (now - pt.last_used_at).total_seconds() > _LAST_USED_DEBOUNCE_SECONDS: - pt.last_used_at = now - await db.commit() - - # Load the user - user_result = await db.execute(select(User).where(User.id == pt.user_id)) - user = user_result.scalar_one_or_none() - if not user or not user.is_active: - raise UnauthorizedException("Token owner account is inactive") - - return AuthContext(user=user, token_scopes=list(pt.scopes)) -``` - -- [ ] **Step 2: Verify syntax** - -Run: `cd /Users/yuzhenjiang1/Downloads/workspace/JoySafeter/backend && python -c "from app.common.auth_dependency import get_current_user_or_token, AuthContext; print('OK')"` -Expected: `OK` - -- [ ] **Step 3: Commit** - -```bash -git add backend/app/common/auth_dependency.py -git commit -m "feat(auth): add get_current_user_or_token() dual-mode auth dependency" -``` - ---- - -### Task 8: Repositories — SkillCollaborator - -**Files:** -- Create: `backend/app/repositories/skill_collaborator.py` - -- [ ] **Step 1: Create SkillCollaborator repository** - -```python -# backend/app/repositories/skill_collaborator.py -"""Skill Collaborator Repository.""" - -from __future__ import annotations - -import uuid -from typing import List, Optional - -from sqlalchemy import and_, select -from sqlalchemy.ext.asyncio import AsyncSession - -from app.models.skill_collaborator import CollaboratorRole, SkillCollaborator - -from .base import BaseRepository - - -class SkillCollaboratorRepository(BaseRepository[SkillCollaborator]): - def __init__(self, db: AsyncSession): - super().__init__(SkillCollaborator, db) - - async def get_by_skill_and_user( - self, skill_id: uuid.UUID, user_id: str - ) -> Optional[SkillCollaborator]: - result = await self.db.execute( - select(SkillCollaborator).where( - and_( - SkillCollaborator.skill_id == skill_id, - SkillCollaborator.user_id == user_id, - ) - ) - ) - return result.scalar_one_or_none() - - async def list_by_skill(self, skill_id: uuid.UUID) -> List[SkillCollaborator]: - result = await self.db.execute( - select(SkillCollaborator).where(SkillCollaborator.skill_id == skill_id) - ) - return list(result.scalars().all()) - - async def list_skill_ids_for_user(self, user_id: str) -> List[uuid.UUID]: - """Return skill IDs where user is a collaborator (used by list_by_user).""" - result = await self.db.execute( - select(SkillCollaborator.skill_id).where( - SkillCollaborator.user_id == user_id - ) - ) - return [row[0] for row in result.all()] - - async def delete_by_skill_and_user( - self, skill_id: uuid.UUID, user_id: str - ) -> bool: - collab = await self.get_by_skill_and_user(skill_id, user_id) - if not collab: - return False - await self.db.delete(collab) - await self.db.flush() - return True -``` - -- [ ] **Step 2: Verify syntax** - -Run: `cd /Users/yuzhenjiang1/Downloads/workspace/JoySafeter/backend && python -c "from app.repositories.skill_collaborator import SkillCollaboratorRepository; print('OK')"` -Expected: `OK` - -- [ ] **Step 3: Commit** - -```bash -git add backend/app/repositories/skill_collaborator.py -git commit -m "feat(repos): add SkillCollaboratorRepository" -``` - ---- - -### Task 9: Repositories — SkillVersion + SkillVersionFile - -**Files:** -- Create: `backend/app/repositories/skill_version.py` - -- [ ] **Step 1: Create SkillVersion repository** - -```python -# backend/app/repositories/skill_version.py -"""Skill Version Repository.""" - -from __future__ import annotations - -import uuid -from typing import List, Optional - -from sqlalchemy import and_, select -from sqlalchemy.ext.asyncio import AsyncSession -from sqlalchemy.orm import selectinload - -from app.models.skill_version import SkillVersion, SkillVersionFile - -from .base import BaseRepository - - -class SkillVersionRepository(BaseRepository[SkillVersion]): - def __init__(self, db: AsyncSession): - super().__init__(SkillVersion, db) - - async def list_by_skill(self, skill_id: uuid.UUID) -> List[SkillVersion]: - result = await self.db.execute( - select(SkillVersion) - .where(SkillVersion.skill_id == skill_id) - .options(selectinload(SkillVersion.files)) - .order_by(SkillVersion.published_at.desc()) - ) - return list(result.scalars().all()) - - async def get_latest(self, skill_id: uuid.UUID) -> Optional[SkillVersion]: - result = await self.db.execute( - select(SkillVersion) - .where(SkillVersion.skill_id == skill_id) - .options(selectinload(SkillVersion.files)) - .order_by(SkillVersion.published_at.desc()) - .limit(1) - ) - return result.scalar_one_or_none() - - async def get_by_version( - self, skill_id: uuid.UUID, version: str - ) -> Optional[SkillVersion]: - result = await self.db.execute( - select(SkillVersion) - .where( - and_( - SkillVersion.skill_id == skill_id, - SkillVersion.version == version, - ) - ) - .options(selectinload(SkillVersion.files)) - ) - return result.scalar_one_or_none() - - async def get_highest_version_str(self, skill_id: uuid.UUID) -> Optional[str]: - """Return the highest semver version string for a skill.""" - versions = await self.list_by_skill(skill_id) - if not versions: - return None - import semver - parsed = [(v, semver.Version.parse(v.version)) for v in versions] - parsed.sort(key=lambda x: x[1], reverse=True) - return parsed[0][0].version - - -class SkillVersionFileRepository(BaseRepository[SkillVersionFile]): - def __init__(self, db: AsyncSession): - super().__init__(SkillVersionFile, db) - - async def list_by_version(self, version_id: uuid.UUID) -> List[SkillVersionFile]: - result = await self.db.execute( - select(SkillVersionFile).where( - SkillVersionFile.version_id == version_id - ) - ) - return list(result.scalars().all()) -``` - -- [ ] **Step 2: Verify syntax** - -Run: `cd /Users/yuzhenjiang1/Downloads/workspace/JoySafeter/backend && python -c "from app.repositories.skill_version import SkillVersionRepository, SkillVersionFileRepository; print('OK')"` -Expected: `OK` - -- [ ] **Step 3: Commit** - -```bash -git add backend/app/repositories/skill_version.py -git commit -m "feat(repos): add SkillVersionRepository and SkillVersionFileRepository" -``` - ---- - -### Task 10: Repository — PlatformToken - -**Files:** -- Create: `backend/app/repositories/platform_token.py` - -- [ ] **Step 1: Create PlatformToken repository** - -```python -# backend/app/repositories/platform_token.py -"""PlatformToken Repository.""" - -from __future__ import annotations - -from typing import List, Optional - -from sqlalchemy import and_, func, select -from sqlalchemy.ext.asyncio import AsyncSession - -from app.models.platform_token import PlatformToken - -from .base import BaseRepository - - -class PlatformTokenRepository(BaseRepository[PlatformToken]): - def __init__(self, db: AsyncSession): - super().__init__(PlatformToken, db) - - async def get_by_hash(self, token_hash: str) -> Optional[PlatformToken]: - result = await self.db.execute( - select(PlatformToken).where(PlatformToken.token_hash == token_hash) - ) - return result.scalar_one_or_none() - - async def list_by_user(self, user_id: str) -> List[PlatformToken]: - result = await self.db.execute( - select(PlatformToken) - .where(PlatformToken.user_id == user_id) - .order_by(PlatformToken.created_at.desc()) - ) - return list(result.scalars().all()) - - async def count_active_by_user(self, user_id: str) -> int: - result = await self.db.execute( - select(func.count()).select_from(PlatformToken).where( - and_( - PlatformToken.user_id == user_id, - PlatformToken.is_active.is_(True), - ) - ) - ) - return result.scalar() or 0 -``` - -- [ ] **Step 2: Verify syntax** - -Run: `cd /Users/yuzhenjiang1/Downloads/workspace/JoySafeter/backend && python -c "from app.repositories.platform_token import PlatformTokenRepository; print('OK')"` -Expected: `OK` - -- [ ] **Step 3: Commit** - -```bash -git add backend/app/repositories/platform_token.py -git commit -m "feat(repos): add PlatformTokenRepository" -``` - ---- - -### Task 11: Extend `SkillRepository.list_by_user()` for Collaborator Access - -**Files:** -- Modify: `backend/app/repositories/skill.py:23-66` - -- [ ] **Step 1: Add collaborator subquery to list_by_user** - -In `backend/app/repositories/skill.py`, add import at top: -```python -from app.models.skill_collaborator import SkillCollaborator -``` - -Modify the `list_by_user` method — in the `if user_id:` block where `include_public` is True, change: -```python -# OLD -conditions.append( - or_( - Skill.owner_id == user_id, - Skill.is_public.is_(True), - Skill.owner_id.is_(None), - ) -) -``` -to: -```python -# NEW — include skills where user is a collaborator -collab_subquery = select(SkillCollaborator.skill_id).where( - SkillCollaborator.user_id == user_id -).scalar_subquery() -conditions.append( - or_( - Skill.owner_id == user_id, - Skill.id.in_(collab_subquery), - Skill.is_public.is_(True), - Skill.owner_id.is_(None), - ) -) -``` - -And the `include_public=False` branch: -```python -# OLD -conditions.append(Skill.owner_id == user_id) -``` -to: -```python -# NEW -collab_subquery = select(SkillCollaborator.skill_id).where( - SkillCollaborator.user_id == user_id -).scalar_subquery() -conditions.append( - or_( - Skill.owner_id == user_id, - Skill.id.in_(collab_subquery), - ) -) -``` - -- [ ] **Step 2: Verify syntax** - -Run: `cd /Users/yuzhenjiang1/Downloads/workspace/JoySafeter/backend && python -c "from app.repositories.skill import SkillRepository; print('OK')"` -Expected: `OK` - -- [ ] **Step 3: Commit** - -```bash -git add backend/app/repositories/skill.py -git commit -m "feat(repos): extend list_by_user to include collaborator skills" -``` - ---- - -### Task 12: Pydantic Schemas — SkillCollaborator - -**Files:** -- Create: `backend/app/schemas/skill_collaborator.py` - -- [ ] **Step 1: Create schemas** - -```python -# backend/app/schemas/skill_collaborator.py -"""Pydantic schemas for Skill Collaborator API.""" - -import uuid -from datetime import datetime -from typing import Optional - -from pydantic import BaseModel, Field, field_validator - -from app.models.skill_collaborator import CollaboratorRole - - -class CollaboratorCreate(BaseModel): - user_id: str = Field(..., description="User ID to add as collaborator") - role: CollaboratorRole = Field(..., description="Role to assign") - - -class CollaboratorUpdate(BaseModel): - role: CollaboratorRole = Field(..., description="New role") - - -class CollaboratorSchema(BaseModel): - id: str - skill_id: str - user_id: str - role: CollaboratorRole - invited_by: str - created_at: Optional[str] = None - - @field_validator("id", "skill_id", mode="before") - @classmethod - def convert_uuid_to_str(cls, v): - if isinstance(v, uuid.UUID): - return str(v) - return v - - @field_validator("created_at", mode="before") - @classmethod - def convert_datetime_to_str(cls, v): - if isinstance(v, datetime): - return v.isoformat() - return v - - class Config: - from_attributes = True - - -class TransferOwnershipRequest(BaseModel): - new_owner_id: str = Field(..., description="User ID of new owner") -``` - -- [ ] **Step 2: Verify syntax** - -Run: `cd /Users/yuzhenjiang1/Downloads/workspace/JoySafeter/backend && python -c "from app.schemas.skill_collaborator import CollaboratorCreate, CollaboratorSchema, TransferOwnershipRequest; print('OK')"` -Expected: `OK` - -- [ ] **Step 3: Commit** - -```bash -git add backend/app/schemas/skill_collaborator.py -git commit -m "feat(schemas): add skill collaborator Pydantic schemas" -``` - ---- - -### Task 13: Pydantic Schemas — SkillVersion - -**Files:** -- Create: `backend/app/schemas/skill_version.py` - -- [ ] **Step 1: Create schemas** - -```python -# backend/app/schemas/skill_version.py -"""Pydantic schemas for Skill Version API.""" - -import uuid -from datetime import datetime -from typing import List, Optional - -from pydantic import BaseModel, Field, field_validator - - -class VersionPublishRequest(BaseModel): - version: str = Field(..., description="Semver MAJOR.MINOR.PATCH", max_length=20) - release_notes: Optional[str] = Field(None, description="Changelog / release notes") - - -class VersionRestoreRequest(BaseModel): - version: str = Field(..., description="Version to restore draft from") - - -class VersionFileSchema(BaseModel): - id: str - version_id: str - path: str - file_name: str - file_type: str - content: Optional[str] = None - storage_type: str = "database" - storage_key: Optional[str] = None - size: int = 0 - - @field_validator("id", "version_id", mode="before") - @classmethod - def convert_uuid_to_str(cls, v): - if isinstance(v, uuid.UUID): - return str(v) - return v - - class Config: - from_attributes = True - - -class VersionSchema(BaseModel): - id: str - skill_id: str - version: str - release_notes: Optional[str] = None - skill_name: str - skill_description: str - content: str - tags: List[str] = Field(default_factory=list) - metadata: dict = Field(default_factory=dict, validation_alias="meta_data") - allowed_tools: List[str] = Field(default_factory=list) - compatibility: Optional[str] = None - license: Optional[str] = None - published_by_id: str - published_at: Optional[str] = None - created_at: Optional[str] = None - files: Optional[List[VersionFileSchema]] = None - - @field_validator("id", "skill_id", mode="before") - @classmethod - def convert_uuid_to_str(cls, v): - if isinstance(v, uuid.UUID): - return str(v) - return v - - @field_validator("published_at", "created_at", mode="before") - @classmethod - def convert_datetime_to_str(cls, v): - if isinstance(v, datetime): - return v.isoformat() - return v - - class Config: - from_attributes = True - populate_by_name = True - - -class VersionSummarySchema(BaseModel): - """Lightweight version info for embedding in SkillSchema.""" - version: str - release_notes: Optional[str] = None - published_at: Optional[str] = None - - @field_validator("published_at", mode="before") - @classmethod - def convert_datetime_to_str(cls, v): - if isinstance(v, datetime): - return v.isoformat() - return v - - class Config: - from_attributes = True -``` - -- [ ] **Step 2: Verify syntax** - -Run: `cd /Users/yuzhenjiang1/Downloads/workspace/JoySafeter/backend && python -c "from app.schemas.skill_version import VersionPublishRequest, VersionSchema, VersionSummarySchema; print('OK')"` -Expected: `OK` - -- [ ] **Step 3: Commit** - -```bash -git add backend/app/schemas/skill_version.py -git commit -m "feat(schemas): add skill version Pydantic schemas" -``` - ---- - -### Task 14: Pydantic Schemas — PlatformToken - -**Files:** -- Create: `backend/app/schemas/platform_token.py` - -- [ ] **Step 1: Create schemas** - -```python -# backend/app/schemas/platform_token.py -"""Pydantic schemas for PlatformToken API.""" - -import uuid -from datetime import datetime -from typing import List, Optional - -from pydantic import BaseModel, Field, field_validator - - -class TokenCreate(BaseModel): - name: str = Field(..., max_length=255) - scopes: List[str] = Field(..., description="e.g. ['skills:read', 'skills:write']") - resource_type: Optional[str] = Field(None, max_length=50) - resource_id: Optional[str] = None - expires_at: Optional[datetime] = None - - -class TokenCreateResponse(BaseModel): - """Returned only once at creation — contains plaintext token.""" - id: str - name: str - token: str # plaintext, shown only once - token_prefix: str - scopes: List[str] - expires_at: Optional[str] = None - - @field_validator("id", mode="before") - @classmethod - def convert_uuid_to_str(cls, v): - if isinstance(v, uuid.UUID): - return str(v) - return v - - @field_validator("expires_at", mode="before") - @classmethod - def convert_datetime_to_str(cls, v): - if isinstance(v, datetime): - return v.isoformat() - return v - - -class TokenSchema(BaseModel): - """List view — never contains plaintext token.""" - id: str - name: str - token_prefix: str - scopes: List[str] - resource_type: Optional[str] = None - resource_id: Optional[str] = None - expires_at: Optional[str] = None - last_used_at: Optional[str] = None - is_active: bool - created_at: Optional[str] = None - - @field_validator("id", mode="before") - @classmethod - def convert_uuid_to_str(cls, v): - if isinstance(v, uuid.UUID): - return str(v) - return v - - @field_validator("resource_id", mode="before") - @classmethod - def convert_resource_id(cls, v): - if isinstance(v, uuid.UUID): - return str(v) - return v - - @field_validator("expires_at", "last_used_at", "created_at", mode="before") - @classmethod - def convert_datetime_to_str(cls, v): - if isinstance(v, datetime): - return v.isoformat() - return v - - class Config: - from_attributes = True -``` - -- [ ] **Step 2: Verify syntax** - -Run: `cd /Users/yuzhenjiang1/Downloads/workspace/JoySafeter/backend && python -c "from app.schemas.platform_token import TokenCreate, TokenCreateResponse, TokenSchema; print('OK')"` -Expected: `OK` - -- [ ] **Step 3: Commit** - -```bash -git add backend/app/schemas/platform_token.py -git commit -m "feat(schemas): add PlatformToken Pydantic schemas" -``` - ---- - -### Task 15: SkillCollaborator Service - -**Files:** -- Create: `backend/app/services/skill_collaborator_service.py` -- Test: `backend/tests/test_api/test_skill_collaborators.py` (later in Task 21) - -- [ ] **Step 1: Implement service** - -```python -# backend/app/services/skill_collaborator_service.py -"""Skill Collaborator Service — add/update/remove collaborators + ownership transfer.""" - -from __future__ import annotations - -import uuid -from typing import List - -from app.common.exceptions import BadRequestException, ForbiddenException, NotFoundException -from app.common.skill_permissions import check_skill_access -from app.models.skill import Skill -from app.models.skill_collaborator import CollaboratorRole, SkillCollaborator -from app.repositories.skill import SkillRepository -from app.repositories.skill_collaborator import SkillCollaboratorRepository - -from .base import BaseService - - -class SkillCollaboratorService(BaseService[SkillCollaborator]): - def __init__(self, db): - super().__init__(db) - self.repo = SkillCollaboratorRepository(db) - self.skill_repo = SkillRepository(db) - - async def list_collaborators( - self, - skill_id: uuid.UUID, - current_user_id: str, - is_superuser: bool = False, - ) -> List[SkillCollaborator]: - skill = await self._get_skill_or_404(skill_id) - await check_skill_access( - self.db, skill, current_user_id, CollaboratorRole.viewer, - is_superuser=is_superuser, - ) - return await self.repo.list_by_skill(skill_id) - - async def add_collaborator( - self, - skill_id: uuid.UUID, - current_user_id: str, - target_user_id: str, - role: CollaboratorRole, - is_superuser: bool = False, - ) -> SkillCollaborator: - skill = await self._get_skill_or_404(skill_id) - await check_skill_access( - self.db, skill, current_user_id, CollaboratorRole.admin, - is_superuser=is_superuser, - ) - - if target_user_id == skill.owner_id: - raise BadRequestException("Cannot add the owner as a collaborator") - - existing = await self.repo.get_by_skill_and_user(skill_id, target_user_id) - if existing: - raise BadRequestException("User is already a collaborator") - - collab = SkillCollaborator( - skill_id=skill_id, - user_id=target_user_id, - role=role, - invited_by=current_user_id, - ) - self.db.add(collab) - await self.db.commit() - await self.db.refresh(collab) - return collab - - async def update_collaborator_role( - self, - skill_id: uuid.UUID, - current_user_id: str, - target_user_id: str, - new_role: CollaboratorRole, - is_superuser: bool = False, - ) -> SkillCollaborator: - skill = await self._get_skill_or_404(skill_id) - await check_skill_access( - self.db, skill, current_user_id, CollaboratorRole.admin, - is_superuser=is_superuser, - ) - - collab = await self.repo.get_by_skill_and_user(skill_id, target_user_id) - if not collab: - raise NotFoundException("Collaborator not found") - - collab.role = new_role - await self.db.commit() - await self.db.refresh(collab) - return collab - - async def remove_collaborator( - self, - skill_id: uuid.UUID, - current_user_id: str, - target_user_id: str, - is_superuser: bool = False, - ) -> None: - skill = await self._get_skill_or_404(skill_id) - await check_skill_access( - self.db, skill, current_user_id, CollaboratorRole.admin, - is_superuser=is_superuser, - ) - - deleted = await self.repo.delete_by_skill_and_user(skill_id, target_user_id) - if not deleted: - raise NotFoundException("Collaborator not found") - await self.db.commit() - - async def transfer_ownership( - self, - skill_id: uuid.UUID, - current_user_id: str, - new_owner_id: str, - ) -> Skill: - """Transfer ownership. Only the current owner can do this.""" - skill = await self._get_skill_or_404(skill_id) - - if skill.owner_id != current_user_id: - raise ForbiddenException("Only the owner can transfer ownership") - - # Check new owner doesn't have a skill with the same name - existing = await self.skill_repo.get_by_name_and_owner(skill.name, new_owner_id) - if existing: - raise BadRequestException( - f"New owner already has a skill named '{skill.name}'" - ) - - # Remove new owner from collaborators if present - await self.repo.delete_by_skill_and_user(skill_id, new_owner_id) - - # Add old owner as admin collaborator - old_owner_collab = SkillCollaborator( - skill_id=skill_id, - user_id=current_user_id, - role=CollaboratorRole.admin, - invited_by=current_user_id, - ) - self.db.add(old_owner_collab) - - # Transfer - skill.owner_id = new_owner_id - await self.db.commit() - await self.db.refresh(skill) - return skill - - async def _get_skill_or_404(self, skill_id: uuid.UUID) -> Skill: - skill = await self.skill_repo.get(skill_id) - if not skill: - raise NotFoundException("Skill not found") - return skill -``` - -- [ ] **Step 2: Verify syntax** - -Run: `cd /Users/yuzhenjiang1/Downloads/workspace/JoySafeter/backend && python -c "from app.services.skill_collaborator_service import SkillCollaboratorService; print('OK')"` -Expected: `OK` - -- [ ] **Step 3: Commit** - -```bash -git add backend/app/services/skill_collaborator_service.py -git commit -m "feat(services): add SkillCollaboratorService" -``` - ---- - -### Task 16: SkillVersion Service - -**Files:** -- Create: `backend/app/services/skill_version_service.py` - -- [ ] **Step 1: Implement service** - -```python -# backend/app/services/skill_version_service.py -"""Skill Version Service — publish, list, get, delete, restore.""" - -from __future__ import annotations - -import uuid -from datetime import datetime, timezone -from typing import List, Optional - -import semver - -from app.common.exceptions import BadRequestException, NotFoundException -from app.common.skill_permissions import check_skill_access -from app.models.skill import Skill, SkillFile -from app.models.skill_collaborator import CollaboratorRole -from app.models.skill_version import SkillVersion, SkillVersionFile -from app.repositories.skill import SkillFileRepository, SkillRepository -from app.repositories.skill_version import SkillVersionFileRepository, SkillVersionRepository - -from .base import BaseService - - -class SkillVersionService(BaseService[SkillVersion]): - def __init__(self, db): - super().__init__(db) - self.repo = SkillVersionRepository(db) - self.file_repo = SkillVersionFileRepository(db) - self.skill_repo = SkillRepository(db) - self.skill_file_repo = SkillFileRepository(db) - - async def publish_version( - self, - skill_id: uuid.UUID, - current_user_id: str, - version_str: str, - release_notes: Optional[str] = None, - is_superuser: bool = False, - token_scopes: Optional[List[str]] = None, - ) -> SkillVersion: - skill = await self._get_skill_or_404(skill_id) - await check_skill_access( - self.db, skill, current_user_id, CollaboratorRole.publisher, - is_superuser=is_superuser, - token_scopes=token_scopes, required_scope="skills:publish", - ) - - # Validate semver format - try: - new_ver = semver.Version.parse(version_str) - except ValueError: - raise BadRequestException( - f"Invalid version format: '{version_str}'. Must be MAJOR.MINOR.PATCH" - ) - # Reject pre-release / build metadata - if new_ver.prerelease or new_ver.build: - raise BadRequestException("Pre-release and build metadata are not supported") - - # Check > highest existing - highest_str = await self.repo.get_highest_version_str(skill_id) - if highest_str: - highest = semver.Version.parse(highest_str) - if new_ver <= highest: - raise BadRequestException( - f"Version {version_str} must be greater than current highest {highest_str}" - ) - - # Snapshot - sv = SkillVersion( - skill_id=skill_id, - version=version_str, - release_notes=release_notes, - skill_name=skill.name, - skill_description=skill.description, - content=skill.content, - tags=list(skill.tags) if skill.tags else [], - meta_data=dict(skill.meta_data) if skill.meta_data else {}, - allowed_tools=list(skill.allowed_tools) if skill.allowed_tools else [], - compatibility=skill.compatibility, - license=skill.license, - published_by_id=current_user_id, - published_at=datetime.now(timezone.utc), - ) - self.db.add(sv) - await self.db.flush() - await self.db.refresh(sv) - - # Copy files - skill_files = await self.skill_file_repo.list_by_skill(skill_id) - for sf in skill_files: - vf = SkillVersionFile( - version_id=sv.id, - path=sf.path, - file_name=sf.file_name, - file_type=sf.file_type, - content=sf.content, - storage_type=sf.storage_type, - storage_key=sf.storage_key, - size=sf.size, - ) - self.db.add(vf) - - await self.db.commit() - await self.db.refresh(sv) - return sv - - async def list_versions( - self, - skill_id: uuid.UUID, - current_user_id: str, - is_superuser: bool = False, - token_scopes: Optional[List[str]] = None, - ) -> List[SkillVersion]: - skill = await self._get_skill_or_404(skill_id) - await check_skill_access( - self.db, skill, current_user_id, CollaboratorRole.viewer, - is_superuser=is_superuser, - token_scopes=token_scopes, required_scope="skills:read", - ) - return await self.repo.list_by_skill(skill_id) - - async def get_version( - self, - skill_id: uuid.UUID, - version_str: str, - current_user_id: str, - is_superuser: bool = False, - token_scopes: Optional[List[str]] = None, - ) -> SkillVersion: - skill = await self._get_skill_or_404(skill_id) - await check_skill_access( - self.db, skill, current_user_id, CollaboratorRole.viewer, - is_superuser=is_superuser, - token_scopes=token_scopes, required_scope="skills:read", - ) - sv = await self.repo.get_by_version(skill_id, version_str) - if not sv: - raise NotFoundException(f"Version {version_str} not found") - return sv - - async def get_latest_version( - self, - skill_id: uuid.UUID, - current_user_id: str, - is_superuser: bool = False, - token_scopes: Optional[List[str]] = None, - ) -> SkillVersion: - skill = await self._get_skill_or_404(skill_id) - await check_skill_access( - self.db, skill, current_user_id, CollaboratorRole.viewer, - is_superuser=is_superuser, - token_scopes=token_scopes, required_scope="skills:read", - ) - sv = await self.repo.get_latest(skill_id) - if not sv: - raise NotFoundException("No published versions found") - return sv - - async def delete_version( - self, - skill_id: uuid.UUID, - version_str: str, - current_user_id: str, - is_superuser: bool = False, - token_scopes: Optional[List[str]] = None, - ) -> None: - skill = await self._get_skill_or_404(skill_id) - await check_skill_access( - self.db, skill, current_user_id, CollaboratorRole.admin, - is_superuser=is_superuser, - token_scopes=token_scopes, required_scope="skills:admin", - ) - sv = await self.repo.get_by_version(skill_id, version_str) - if not sv: - raise NotFoundException(f"Version {version_str} not found") - await self.db.delete(sv) - await self.db.commit() - - async def restore_draft( - self, - skill_id: uuid.UUID, - version_str: str, - current_user_id: str, - is_superuser: bool = False, - token_scopes: Optional[List[str]] = None, - ) -> Skill: - skill = await self._get_skill_or_404(skill_id) - await check_skill_access( - self.db, skill, current_user_id, CollaboratorRole.publisher, - is_superuser=is_superuser, - token_scopes=token_scopes, required_scope="skills:write", - ) - sv = await self.repo.get_by_version(skill_id, version_str) - if not sv: - raise NotFoundException(f"Version {version_str} not found") - - # Overwrite draft - skill.name = sv.skill_name - skill.description = sv.skill_description - skill.content = sv.content - skill.tags = list(sv.tags) if sv.tags else [] - skill.meta_data = dict(sv.meta_data) if sv.meta_data else {} - skill.allowed_tools = list(sv.allowed_tools) if sv.allowed_tools else [] - skill.compatibility = sv.compatibility - skill.license = sv.license - - # Replace draft files - await self.skill_file_repo.delete_by_skill(skill_id) - version_files = await self.file_repo.list_by_version(sv.id) - for vf in version_files: - sf = SkillFile( - skill_id=skill_id, - path=vf.path, - file_name=vf.file_name, - file_type=vf.file_type, - content=vf.content, - storage_type=vf.storage_type, - storage_key=vf.storage_key, - size=vf.size, - ) - self.db.add(sf) - - await self.db.commit() - await self.db.refresh(skill) - return skill - - async def _get_skill_or_404(self, skill_id: uuid.UUID) -> Skill: - skill = await self.skill_repo.get_with_files(skill_id) - if not skill: - raise NotFoundException("Skill not found") - return skill -``` - -- [ ] **Step 2: Verify syntax** - -Run: `cd /Users/yuzhenjiang1/Downloads/workspace/JoySafeter/backend && python -c "from app.services.skill_version_service import SkillVersionService; print('OK')"` -Expected: `OK` - -- [ ] **Step 3: Commit** - -```bash -git add backend/app/services/skill_version_service.py -git commit -m "feat(services): add SkillVersionService with publish/restore/delete" -``` - ---- - -### Task 17: PlatformToken Service - -**Files:** -- Create: `backend/app/services/platform_token_service.py` - -- [ ] **Step 1: Implement service** - -```python -# backend/app/services/platform_token_service.py -"""PlatformToken Service — create, list, revoke.""" - -from __future__ import annotations - -import hashlib -import secrets -import uuid -from datetime import datetime -from typing import List, Optional, Tuple - -from app.common.exceptions import BadRequestException, ForbiddenException, NotFoundException -from app.models.platform_token import PlatformToken -from app.repositories.platform_token import PlatformTokenRepository - -from .base import BaseService - -MAX_ACTIVE_TOKENS_PER_USER = 50 -TOKEN_PREFIX = "sk_" - - -class PlatformTokenService(BaseService[PlatformToken]): - def __init__(self, db): - super().__init__(db) - self.repo = PlatformTokenRepository(db) - - async def create_token( - self, - user_id: str, - name: str, - scopes: List[str], - resource_type: Optional[str] = None, - resource_id: Optional[uuid.UUID] = None, - expires_at: Optional[datetime] = None, - ) -> Tuple[PlatformToken, str]: - """Create a new token. Returns (token_record, plaintext_token).""" - # Check limit - active_count = await self.repo.count_active_by_user(user_id) - if active_count >= MAX_ACTIVE_TOKENS_PER_USER: - raise BadRequestException( - f"Maximum of {MAX_ACTIVE_TOKENS_PER_USER} active tokens reached" - ) - - # Generate token - raw_secret = secrets.token_urlsafe(36) # ~48 chars - plaintext = f"{TOKEN_PREFIX}{raw_secret}" - token_hash = hashlib.sha256(plaintext.encode()).hexdigest() - token_prefix = plaintext[:12] - - pt = PlatformToken( - user_id=user_id, - name=name, - token_hash=token_hash, - token_prefix=token_prefix, - scopes=scopes, - resource_type=resource_type, - resource_id=resource_id, - expires_at=expires_at, - is_active=True, - ) - self.db.add(pt) - await self.db.commit() - await self.db.refresh(pt) - return pt, plaintext - - async def list_tokens(self, user_id: str) -> List[PlatformToken]: - return await self.repo.list_by_user(user_id) - - async def revoke_token( - self, - token_id: uuid.UUID, - user_id: str, - ) -> None: - pt = await self.repo.get(token_id) - if not pt: - raise NotFoundException("Token not found") - if pt.user_id != user_id: - raise ForbiddenException("You can only revoke your own tokens") - pt.is_active = False - await self.db.commit() -``` - -- [ ] **Step 2: Verify syntax** - -Run: `cd /Users/yuzhenjiang1/Downloads/workspace/JoySafeter/backend && python -c "from app.services.platform_token_service import PlatformTokenService; print('OK')"` -Expected: `OK` - -- [ ] **Step 3: Commit** - -```bash -git add backend/app/services/platform_token_service.py -git commit -m "feat(services): add PlatformTokenService with create/list/revoke" -``` - ---- - -### Task 18: Update SkillService — Replace Hardcoded Permission Checks - -**Files:** -- Modify: `backend/app/services/skill_service.py` - -- [ ] **Step 1: Add imports at top of skill_service.py** - -Add after the existing imports: -```python -from app.common.skill_permissions import check_skill_access -from app.models.skill_collaborator import CollaboratorRole -``` - -- [ ] **Step 2: Replace permission checks in `get_skill()`** - -Replace lines 66-68: -```python -# OLD -if skill.owner_id and skill.owner_id != current_user_id and not skill.is_public: - raise ForbiddenException("You don't have permission to access this skill") -``` -with: -```python -# NEW -if current_user_id: - await check_skill_access( - self.db, skill, current_user_id, CollaboratorRole.viewer, - ) -elif not skill.is_public: - raise ForbiddenException("You don't have permission to access this skill") -``` - -- [ ] **Step 3: Replace permission check in `update_skill()`** - -Replace lines 310-312: -```python -# OLD -if skill.owner_id != current_user_id: - raise ForbiddenException("You can only update your own skills") -``` -with: -```python -# NEW -await check_skill_access( - self.db, skill, current_user_id, CollaboratorRole.editor, -) -``` - -- [ ] **Step 4: Replace permission check in `delete_skill()`** - -Replace lines 473-475: -```python -# OLD -if skill.owner_id != current_user_id: - raise ForbiddenException("You can only delete your own skills") -``` -with: -```python -# NEW — only owner can delete -if skill.owner_id != current_user_id: - raise ForbiddenException("Only the owner can delete a skill") -``` - -(Keep owner-only for delete — spec says owner only.) - -- [ ] **Step 5: Replace permission checks in `add_file()`, `delete_file()`, `update_file()`** - -In `add_file()` (around line 501-503), replace: -```python -if skill.owner_id != current_user_id: - raise ForbiddenException("You can only add files to your own skills") -``` -with: -```python -await check_skill_access( - self.db, skill, current_user_id, CollaboratorRole.editor, -) -``` - -Apply the same pattern to `delete_file()` (around line 559-561) and `update_file()` (around line 583-585). - -- [ ] **Step 6: Verify syntax** - -Run: `cd /Users/yuzhenjiang1/Downloads/workspace/JoySafeter/backend && python -c "from app.services.skill_service import SkillService; print('OK')"` -Expected: `OK` - -- [ ] **Step 7: Commit** - -```bash -git add backend/app/services/skill_service.py -git commit -m "refactor(services): replace hardcoded owner_id checks with check_skill_access()" -``` - ---- - -### Task 19: Skill Versions API Routes - -**Files:** -- Create: `backend/app/api/v1/skill_versions.py` - -- [ ] **Step 1: Write the route file** - -```python -"""Skill Version API routes.""" - -from __future__ import annotations - -import uuid -from typing import Optional - -from fastapi import APIRouter, Depends -from sqlalchemy.ext.asyncio import AsyncSession - -from app.common.auth_dependency import AuthContext, get_current_user_or_token -from app.common.dependencies import get_current_user -from app.core.database import get_db -from app.models.auth import AuthUser as User -from app.schemas.skill_version import ( - VersionPublishRequest, - VersionRestoreRequest, - VersionSchema, - VersionSummarySchema, -) -from app.services.skill_version_service import SkillVersionService - -router = APIRouter(prefix="/v1/skills", tags=["Skill Versions"]) - - -@router.post("/{skill_id}/versions") -async def publish_version( - skill_id: uuid.UUID, - payload: VersionPublishRequest, - db: AsyncSession = Depends(get_db), - auth: AuthContext = Depends(get_current_user_or_token), -): - """发布新版本(快照当前 draft)。""" - service = SkillVersionService(db) - version = await service.publish_version( - skill_id=skill_id, - current_user_id=auth.user.id, - version_str=payload.version, - release_notes=payload.release_notes, - is_superuser=auth.user.is_superuser, - token_scopes=auth.scopes, - ) - return { - "success": True, - "data": VersionSchema.model_validate(version).model_dump(), - } - - -@router.get("/{skill_id}/versions") -async def list_versions( - skill_id: uuid.UUID, - db: AsyncSession = Depends(get_db), - auth: AuthContext = Depends(get_current_user_or_token), -): - """列出所有已发布版本(按版本号降序)。""" - service = SkillVersionService(db) - versions = await service.list_versions( - skill_id=skill_id, - current_user_id=auth.user.id, - is_superuser=auth.user.is_superuser, - token_scopes=auth.scopes, - ) - return { - "success": True, - "data": [VersionSummarySchema.model_validate(v).model_dump() for v in versions], - } - - -@router.get("/{skill_id}/versions/latest") -async def get_latest_version( - skill_id: uuid.UUID, - db: AsyncSession = Depends(get_db), - auth: AuthContext = Depends(get_current_user_or_token), -): - """获取最新已发布版本详情(含文件)。""" - service = SkillVersionService(db) - version = await service.get_latest_version( - skill_id=skill_id, - current_user_id=auth.user.id, - is_superuser=auth.user.is_superuser, - token_scopes=auth.scopes, - ) - return { - "success": True, - "data": VersionSchema.model_validate(version).model_dump(), - } - - -@router.get("/{skill_id}/versions/{version}") -async def get_version( - skill_id: uuid.UUID, - version: str, - db: AsyncSession = Depends(get_db), - auth: AuthContext = Depends(get_current_user_or_token), -): - """获取特定版本详情(含文件)。""" - service = SkillVersionService(db) - ver = await service.get_version( - skill_id=skill_id, - version_str=version, - current_user_id=auth.user.id, - is_superuser=auth.user.is_superuser, - token_scopes=auth.scopes, - ) - return { - "success": True, - "data": VersionSchema.model_validate(ver).model_dump(), - } - - -@router.delete("/{skill_id}/versions/{version}") -async def delete_version( - skill_id: uuid.UUID, - version: str, - db: AsyncSession = Depends(get_db), - current_user: User = Depends(get_current_user), -): - """删除版本(admin+ 权限,仅 session auth)。""" - service = SkillVersionService(db) - await service.delete_version( - skill_id=skill_id, - version_str=version, - current_user_id=current_user.id, - is_superuser=current_user.is_superuser, - ) - return {"success": True} - - -@router.post("/{skill_id}/restore") -async def restore_version( - skill_id: uuid.UUID, - payload: VersionRestoreRequest, - db: AsyncSession = Depends(get_db), - current_user: User = Depends(get_current_user), -): - """基于历史版本恢复 draft(仅 session auth)。""" - service = SkillVersionService(db) - skill = await service.restore_draft( - skill_id=skill_id, - version_str=payload.version, - current_user_id=current_user.id, - is_superuser=current_user.is_superuser, - ) - from app.schemas.skill import SkillSchema - - return { - "success": True, - "data": SkillSchema.model_validate(skill).model_dump(), - } -``` - -- [ ] **Step 2: Verify syntax** - -Run: `cd /Users/yuzhenjiang1/Downloads/workspace/JoySafeter/backend && python -c "from app.api.v1.skill_versions import router; print(len(router.routes), 'routes')"` -Expected: `6 routes` - -- [ ] **Step 3: Commit** - -```bash -git add backend/app/api/v1/skill_versions.py -git commit -m "feat(api): add skill versions API routes" -``` - ---- - -### Task 20: Skill Collaborators API Routes - -**Files:** -- Create: `backend/app/api/v1/skill_collaborators.py` - -- [ ] **Step 1: Write the route file** - -```python -"""Skill Collaborator API routes.""" - -from __future__ import annotations - -import uuid - -from fastapi import APIRouter, Depends -from sqlalchemy.ext.asyncio import AsyncSession - -from app.common.dependencies import get_current_user -from app.core.database import get_db -from app.models.auth import AuthUser as User -from app.schemas.skill_collaborator import ( - CollaboratorCreate, - CollaboratorSchema, - CollaboratorUpdate, - TransferOwnershipRequest, -) -from app.services.skill_collaborator_service import SkillCollaboratorService - -router = APIRouter(prefix="/v1/skills", tags=["Skill Collaborators"]) - - -@router.get("/{skill_id}/collaborators") -async def list_collaborators( - skill_id: uuid.UUID, - db: AsyncSession = Depends(get_db), - current_user: User = Depends(get_current_user), -): - """列出 skill 的所有协作者。""" - service = SkillCollaboratorService(db) - collaborators = await service.list_collaborators( - skill_id=skill_id, - current_user_id=current_user.id, - is_superuser=current_user.is_superuser, - ) - return { - "success": True, - "data": [CollaboratorSchema.model_validate(c).model_dump() for c in collaborators], - } - - -@router.post("/{skill_id}/collaborators") -async def add_collaborator( - skill_id: uuid.UUID, - payload: CollaboratorCreate, - db: AsyncSession = Depends(get_db), - current_user: User = Depends(get_current_user), -): - """添加协作者(admin+ 权限)。""" - service = SkillCollaboratorService(db) - collaborator = await service.add_collaborator( - skill_id=skill_id, - current_user_id=current_user.id, - target_user_id=payload.user_id, - role=payload.role, - is_superuser=current_user.is_superuser, - ) - return { - "success": True, - "data": CollaboratorSchema.model_validate(collaborator).model_dump(), - } - - -@router.put("/{skill_id}/collaborators/{target_user_id}") -async def update_collaborator( - skill_id: uuid.UUID, - target_user_id: str, - payload: CollaboratorUpdate, - db: AsyncSession = Depends(get_db), - current_user: User = Depends(get_current_user), -): - """修改协作者角色(admin+ 权限)。""" - service = SkillCollaboratorService(db) - collaborator = await service.update_collaborator_role( - skill_id=skill_id, - current_user_id=current_user.id, - target_user_id=target_user_id, - new_role=payload.role, - is_superuser=current_user.is_superuser, - ) - return { - "success": True, - "data": CollaboratorSchema.model_validate(collaborator).model_dump(), - } - - -@router.delete("/{skill_id}/collaborators/{target_user_id}") -async def remove_collaborator( - skill_id: uuid.UUID, - target_user_id: str, - db: AsyncSession = Depends(get_db), - current_user: User = Depends(get_current_user), -): - """移除协作者(admin+ 权限)。""" - service = SkillCollaboratorService(db) - await service.remove_collaborator( - skill_id=skill_id, - current_user_id=current_user.id, - target_user_id=target_user_id, - is_superuser=current_user.is_superuser, - ) - return {"success": True} - - -@router.post("/{skill_id}/transfer") -async def transfer_ownership( - skill_id: uuid.UUID, - payload: TransferOwnershipRequest, - db: AsyncSession = Depends(get_db), - current_user: User = Depends(get_current_user), -): - """转让 ownership(仅 owner 可操作)。""" - service = SkillCollaboratorService(db) - await service.transfer_ownership( - skill_id=skill_id, - current_user_id=current_user.id, - new_owner_id=payload.new_owner_id, - is_superuser=current_user.is_superuser, - ) - return {"success": True} -``` - -- [ ] **Step 2: Verify syntax** - -Run: `cd /Users/yuzhenjiang1/Downloads/workspace/JoySafeter/backend && python -c "from app.api.v1.skill_collaborators import router; print(len(router.routes), 'routes')"` -Expected: `5 routes` - -- [ ] **Step 3: Commit** - -```bash -git add backend/app/api/v1/skill_collaborators.py -git commit -m "feat(api): add skill collaborators API routes" -``` - ---- - -### Task 21: Token API Routes - -**Files:** -- Create: `backend/app/api/v1/tokens.py` - -- [ ] **Step 1: Write the route file** - -```python -"""PlatformToken API routes. - -Token management is session-auth only — PlatformToken cannot manage PlatformToken. -""" - -from __future__ import annotations - -import uuid - -from fastapi import APIRouter, Depends -from sqlalchemy.ext.asyncio import AsyncSession - -from app.common.dependencies import get_current_user -from app.core.database import get_db -from app.models.auth import AuthUser as User -from app.schemas.platform_token import ( - TokenCreate, - TokenCreateResponse, - TokenSchema, -) -from app.services.platform_token_service import PlatformTokenService - -router = APIRouter(prefix="/v1/tokens", tags=["Tokens"]) - - -@router.post("") -async def create_token( - payload: TokenCreate, - db: AsyncSession = Depends(get_db), - current_user: User = Depends(get_current_user), -): - """创建 token(返回明文,仅此一次)。""" - service = PlatformTokenService(db) - token_record, raw_token = await service.create_token( - user_id=current_user.id, - name=payload.name, - scopes=payload.scopes, - resource_type=payload.resource_type, - resource_id=payload.resource_id, - expires_at=payload.expires_at, - ) - resp = TokenCreateResponse.model_validate(token_record) - # Inject the raw token (not stored in DB) - data = resp.model_dump() - data["token"] = raw_token - return {"success": True, "data": data} - - -@router.get("") -async def list_tokens( - db: AsyncSession = Depends(get_db), - current_user: User = Depends(get_current_user), -): - """列出我的 tokens(不含明文,显示 prefix)。""" - service = PlatformTokenService(db) - tokens = await service.list_tokens(user_id=current_user.id) - return { - "success": True, - "data": [TokenSchema.model_validate(t).model_dump() for t in tokens], - } - - -@router.delete("/{token_id}") -async def revoke_token( - token_id: uuid.UUID, - db: AsyncSession = Depends(get_db), - current_user: User = Depends(get_current_user), -): - """撤销 token(soft delete: is_active = False)。""" - service = PlatformTokenService(db) - await service.revoke_token(token_id=token_id, user_id=current_user.id) - return {"success": True} -``` - -- [ ] **Step 2: Verify syntax** - -Run: `cd /Users/yuzhenjiang1/Downloads/workspace/JoySafeter/backend && python -c "from app.api.v1.tokens import router; print(len(router.routes), 'routes')"` -Expected: `3 routes` - -- [ ] **Step 3: Commit** - -```bash -git add backend/app/api/v1/tokens.py -git commit -m "feat(api): add platform token API routes" -``` - ---- - -### Task 22: Register New Routers - -**Files:** -- Modify: `backend/app/api/v1/__init__.py` - -- [ ] **Step 1: Add imports** - -After `from .skills import router as skills_router` (line 31), add: - -```python -from .skill_collaborators import router as skill_collaborators_router -from .skill_versions import router as skill_versions_router -from .tokens import router as tokens_router -``` - -- [ ] **Step 2: Add to ROUTERS list** - -After `skills_router,` (line 58), add: - -```python - skill_versions_router, - skill_collaborators_router, - tokens_router, -``` - -- [ ] **Step 3: Verify import** - -Run: `cd /Users/yuzhenjiang1/Downloads/workspace/JoySafeter/backend && python -c "from app.api.v1 import api_router; print(len(api_router.routes), 'total routes')"` -Expected: prints route count (should be previous count + 14 new routes) - -- [ ] **Step 4: Commit** - -```bash -git add backend/app/api/v1/__init__.py -git commit -m "feat(api): register skill versions, collaborators, and token routers" -``` - ---- - -### Task 23: Update SkillSchema — Add `latest_version` Field - -**Files:** -- Modify: `backend/app/schemas/skill.py:65-133` - -- [ ] **Step 1: Add `latest_version` to SkillSchema** - -In `SkillSchema` class (line 65), add a new optional field after `files`: - -```python - files: Optional[List[SkillFileSchema]] = None - latest_version: Optional[str] = None -``` - -- [ ] **Step 2: Update `map_meta_data_from_attributes` validator** - -In the `model_validator` method (line 103-129), update the loop that gets extra attributes (line 120) to include `latest_version`: - -Replace: -```python - for key in ["id", "created_at", "updated_at", "files"]: -``` -with: -```python - for key in ["id", "created_at", "updated_at", "files", "latest_version"]: -``` - -- [ ] **Step 3: Verify syntax** - -Run: `cd /Users/yuzhenjiang1/Downloads/workspace/JoySafeter/backend && python -c "from app.schemas.skill import SkillSchema; print(SkillSchema.model_fields.keys())"` -Expected: includes `latest_version` - -- [ ] **Step 4: Commit** - -```bash -git add backend/app/schemas/skill.py -git commit -m "feat(schemas): add latest_version field to SkillSchema" -``` - ---- - -### Task 24: Populate `latest_version` in SkillService - -**Files:** -- Modify: `backend/app/services/skill_service.py` - -Note: Task 11 already handles the `list_by_user()` collaborator subquery. This task adds `latest_version` population. - -- [ ] **Step 1: Add helper to populate latest_version** - -In `SkillService`, add a helper method: - -```python -from app.repositories.skill_version import SkillVersionRepository - -async def _attach_latest_version(self, skill): - """Attach latest_version string to skill for API response.""" - ver_repo = SkillVersionRepository(self.db) - latest = await ver_repo.get_latest(skill.id) - skill.latest_version = latest.version if latest else None - return skill -``` - -- [ ] **Step 2: Call helper in `get_skill()`** - -At the end of `get_skill()`, before returning the skill, call: -```python -skill = await self._attach_latest_version(skill) -``` - -- [ ] **Step 3: Verify syntax** - -Run: `cd /Users/yuzhenjiang1/Downloads/workspace/JoySafeter/backend && python -c "from app.services.skill_service import SkillService; print('OK')"` -Expected: `OK` - -- [ ] **Step 4: Commit** - -```bash -git add backend/app/services/skill_service.py -git commit -m "feat(services): populate latest_version on skill responses" -``` - ---- - -### Task 25: Unit Tests — Skill Permissions Utility - -**Files:** -- Create: `backend/tests/test_services/__init__.py` -- Create: `backend/tests/test_services/test_skill_permissions.py` - -- [ ] **Step 1: Create `__init__.py`** - -```python -# (empty) -``` - -- [ ] **Step 2: Write tests for `check_skill_access`** - -```python -"""Unit tests for check_skill_access permission utility.""" - -from __future__ import annotations - -import uuid -from unittest.mock import AsyncMock, MagicMock - -import pytest - -from app.common.skill_permissions import check_skill_access -from app.models.skill_collaborator import CollaboratorRole - - -def _mock_skill(owner_id: str = "owner-1", is_public: bool = False): - s = MagicMock() - s.id = uuid.uuid4() - s.owner_id = owner_id - s.is_public = is_public - return s - - -def _mock_db(collaborator_role=None): - """Return an AsyncMock db session. - - If collaborator_role is given, simulate a SkillCollaborator record. - """ - db = AsyncMock() - result_mock = MagicMock() - if collaborator_role is not None: - collab = MagicMock() - collab.role = collaborator_role - result_mock.scalar_one_or_none.return_value = collab - else: - result_mock.scalar_one_or_none.return_value = None - db.execute.return_value = result_mock - return db - - -class TestCheckSkillAccess: - """Verify the unified permission checking logic.""" - - @pytest.mark.asyncio - async def test_owner_always_passes(self): - skill = _mock_skill(owner_id="user-1") - db = _mock_db() - # Owner should pass even with admin requirement - await check_skill_access(db, skill, "user-1", CollaboratorRole.admin) - - @pytest.mark.asyncio - async def test_superuser_always_passes(self): - skill = _mock_skill(owner_id="other") - db = _mock_db() - # Superuser bypasses all role checks via is_superuser param - await check_skill_access( - db, skill, "superadmin", CollaboratorRole.admin, - is_superuser=True, - ) - - @pytest.mark.asyncio - async def test_collaborator_with_sufficient_role(self): - skill = _mock_skill(owner_id="other") - db = _mock_db(collaborator_role=CollaboratorRole.editor) - await check_skill_access(db, skill, "collab-1", CollaboratorRole.editor) - - @pytest.mark.asyncio - async def test_collaborator_with_insufficient_role(self): - skill = _mock_skill(owner_id="other") - db = _mock_db(collaborator_role=CollaboratorRole.viewer) - from app.common.exceptions import ForbiddenException - - with pytest.raises(ForbiddenException): - await check_skill_access(db, skill, "collab-1", CollaboratorRole.editor) - - @pytest.mark.asyncio - async def test_public_skill_viewer_access(self): - skill = _mock_skill(owner_id="other", is_public=True) - db = _mock_db() - await check_skill_access(db, skill, "random-user", CollaboratorRole.viewer) - - @pytest.mark.asyncio - async def test_public_skill_edit_denied(self): - skill = _mock_skill(owner_id="other", is_public=True) - db = _mock_db() - from app.common.exceptions import ForbiddenException - - with pytest.raises(ForbiddenException): - await check_skill_access(db, skill, "random-user", CollaboratorRole.editor) - - @pytest.mark.asyncio - async def test_no_access_denied(self): - skill = _mock_skill(owner_id="other", is_public=False) - db = _mock_db() - from app.common.exceptions import ForbiddenException - - with pytest.raises(ForbiddenException): - await check_skill_access(db, skill, "stranger", CollaboratorRole.viewer) -``` - -- [ ] **Step 3: Run tests** - -Run: `cd /Users/yuzhenjiang1/Downloads/workspace/JoySafeter/backend && python -m pytest tests/test_services/test_skill_permissions.py -v` -Expected: All 7 tests pass - -- [ ] **Step 4: Commit** - -```bash -git add backend/tests/test_services/ -git commit -m "test: add unit tests for check_skill_access permission utility" -``` - ---- - -### Task 26: Unit Tests — Skill Version Service - -**Files:** -- Create: `backend/tests/test_services/test_skill_version_service.py` - -- [ ] **Step 1: Write tests** - -```python -"""Unit tests for SkillVersionService.""" - -from __future__ import annotations - -import uuid -from unittest.mock import AsyncMock, MagicMock, patch - -import pytest - - -def _mock_skill(owner_id="user-1", name="test-skill", content="body"): - s = MagicMock() - s.id = uuid.uuid4() - s.owner_id = owner_id - s.name = name - s.description = "desc" - s.content = content - s.tags = ["a"] - s.meta_data = {} - s.allowed_tools = [] - s.compatibility = None - s.license = None - s.is_public = False - s.files = [] - return s - - -def _mock_db(): - db = AsyncMock() - db.commit = AsyncMock() - db.flush = AsyncMock() - db.refresh = AsyncMock() - db.add = MagicMock() - return db - - -class TestSkillVersionServicePublish: - """Test version publishing logic.""" - - @pytest.mark.asyncio - async def test_publish_validates_semver_format(self): - """Invalid semver should raise BadRequestException.""" - from app.common.exceptions import BadRequestException - - db = _mock_db() - skill = _mock_skill() - with patch("app.services.skill_version_service.SkillRepository") as MockSkillRepo, \ - patch("app.services.skill_version_service.SkillVersionRepository") as MockVerRepo, \ - patch("app.services.skill_version_service.check_skill_access", new_callable=AsyncMock): - MockSkillRepo.return_value.get.return_value = skill - from app.services.skill_version_service import SkillVersionService - - service = SkillVersionService(db) - with pytest.raises(BadRequestException, match="semver"): - await service.publish_version( - skill_id=skill.id, user_id="user-1", - version="invalid", release_notes="", - ) - - @pytest.mark.asyncio - async def test_publish_rejects_lower_version(self): - """New version must be greater than existing highest.""" - from app.common.exceptions import BadRequestException - - db = _mock_db() - skill = _mock_skill() - existing_version = MagicMock() - existing_version.version = "2.0.0" - - with patch("app.services.skill_version_service.SkillRepository") as MockSkillRepo, \ - patch("app.services.skill_version_service.SkillVersionRepository") as MockVerRepo, \ - patch("app.services.skill_version_service.check_skill_access", new_callable=AsyncMock): - MockSkillRepo.return_value.get.return_value = skill - MockVerRepo.return_value.get_latest_version.return_value = existing_version - from app.services.skill_version_service import SkillVersionService - - service = SkillVersionService(db) - with pytest.raises(BadRequestException, match="greater"): - await service.publish_version( - skill_id=skill.id, user_id="user-1", - version="1.0.0", release_notes="", - ) -``` - -- [ ] **Step 2: Run tests** - -Run: `cd /Users/yuzhenjiang1/Downloads/workspace/JoySafeter/backend && python -m pytest tests/test_services/test_skill_version_service.py -v` -Expected: All 2 tests pass - -- [ ] **Step 3: Commit** - -```bash -git add backend/tests/test_services/test_skill_version_service.py -git commit -m "test: add unit tests for SkillVersionService publish logic" -``` - ---- - -### Task 27: Unit Tests — Platform Token Service - -**Files:** -- Create: `backend/tests/test_services/test_platform_token_service.py` - -- [ ] **Step 1: Write tests** - -```python -"""Unit tests for PlatformTokenService.""" - -from __future__ import annotations - -import uuid -from unittest.mock import AsyncMock, MagicMock, patch - -import pytest - - -def _mock_db(): - db = AsyncMock() - db.commit = AsyncMock() - db.flush = AsyncMock() - db.refresh = AsyncMock() - db.add = MagicMock() - return db - - -class TestPlatformTokenServiceCreate: - """Test token creation logic.""" - - @pytest.mark.asyncio - async def test_create_returns_raw_token_starting_with_sk(self): - """Created token should start with 'sk_' prefix.""" - db = _mock_db() - with patch("app.services.platform_token_service.PlatformTokenRepository") as MockRepo: - MockRepo.return_value.count_active_by_user.return_value = 0 - MockRepo.return_value.create.return_value = MagicMock( - id=uuid.uuid4(), - token_prefix="sk_testprefix", - ) - db.refresh = AsyncMock() - from app.services.platform_token_service import PlatformTokenService - - service = PlatformTokenService(db) - record, raw_token = await service.create_token( - user_id="user-1", - name="test", - scopes=["skills:read"], - ) - assert raw_token.startswith("sk_") - assert len(raw_token) > 12 - - @pytest.mark.asyncio - async def test_create_rejects_when_limit_exceeded(self): - """Should raise BadRequestException when user has 50 active tokens.""" - from app.common.exceptions import BadRequestException - - db = _mock_db() - with patch("app.services.platform_token_service.PlatformTokenRepository") as MockRepo: - MockRepo.return_value.count_active_by_user.return_value = 50 - from app.services.platform_token_service import PlatformTokenService - - service = PlatformTokenService(db) - with pytest.raises(BadRequestException, match="50"): - await service.create_token( - user_id="user-1", - name="test", - scopes=["skills:read"], - ) - - -class TestPlatformTokenServiceRevoke: - """Test token revocation logic.""" - - @pytest.mark.asyncio - async def test_revoke_sets_inactive(self): - """Revoke should set is_active to False.""" - db = _mock_db() - token = MagicMock() - token.id = uuid.uuid4() - token.user_id = "user-1" - token.is_active = True - with patch("app.services.platform_token_service.PlatformTokenRepository") as MockRepo: - MockRepo.return_value.get.return_value = token - from app.services.platform_token_service import PlatformTokenService - - service = PlatformTokenService(db) - await service.revoke_token(token_id=token.id, user_id="user-1") - assert token.is_active is False - - @pytest.mark.asyncio - async def test_revoke_wrong_user_denied(self): - """Should raise ForbiddenException when revoking another user's token.""" - from app.common.exceptions import ForbiddenException - - db = _mock_db() - token = MagicMock() - token.id = uuid.uuid4() - token.user_id = "user-1" - token.is_active = True - with patch("app.services.platform_token_service.PlatformTokenRepository") as MockRepo: - MockRepo.return_value.get.return_value = token - from app.services.platform_token_service import PlatformTokenService - - service = PlatformTokenService(db) - with pytest.raises(ForbiddenException): - await service.revoke_token(token_id=token.id, user_id="user-2") -``` - -- [ ] **Step 2: Run tests** - -Run: `cd /Users/yuzhenjiang1/Downloads/workspace/JoySafeter/backend && python -m pytest tests/test_services/test_platform_token_service.py -v` -Expected: All 4 tests pass - -- [ ] **Step 3: Commit** - -```bash -git add backend/tests/test_services/test_platform_token_service.py -git commit -m "test: add unit tests for PlatformTokenService" -``` - ---- - -## Summary - -| Task | Component | Files | -|------|-----------|-------| -| 1 | CollaboratorRole enum + SkillCollaborator model | `models/skill_collaborator.py` | -| 2 | SkillVersion + SkillVersionFile models | `models/skill_version.py` | -| 3 | PlatformToken model | `models/platform_token.py` | -| 4 | Register models in `__init__` | `models/__init__.py` | -| 5 | Alembic migration | `alembic/versions/...` | -| 6 | Skill permissions utility | `common/skill_permissions.py` | -| 7 | Dual-mode auth dependency | `common/auth_dependency.py` | -| 8 | SkillCollaborator repository | `repositories/skill_collaborator.py` | -| 9 | SkillVersion + SkillVersionFile repository | `repositories/skill_version.py` | -| 10 | PlatformToken repository | `repositories/platform_token.py` | -| 11 | Extend `list_by_user()` with collaborator subquery | `repositories/skill.py` | -| 12 | SkillCollaborator schemas | `schemas/skill_collaborator.py` | -| 13 | SkillVersion schemas | `schemas/skill_version.py` | -| 14 | PlatformToken schemas | `schemas/platform_token.py` | -| 15 | SkillCollaboratorService | `services/skill_collaborator_service.py` | -| 16 | SkillVersionService | `services/skill_version_service.py` | -| 17 | PlatformTokenService | `services/platform_token_service.py` | -| 18 | Update SkillService permission checks | `services/skill_service.py` | -| 19 | Skill Versions API routes | `api/v1/skill_versions.py` | -| 20 | Skill Collaborators API routes | `api/v1/skill_collaborators.py` | -| 21 | Token API routes | `api/v1/tokens.py` | -| 22 | Register new routers | `api/v1/__init__.py` | -| 23 | SkillSchema `latest_version` field | `schemas/skill.py` | -| 24 | Populate `latest_version` in SkillService | `services/skill_service.py` | -| 25 | Tests — skill permissions | `tests/test_services/test_skill_permissions.py` | -| 26 | Tests — version service | `tests/test_services/test_skill_version_service.py` | -| 27 | Tests — token service | `tests/test_services/test_platform_token_service.py` | diff --git a/docs/superpowers/plans/2026-03-21-chat-layout-interaction-alignment.md b/docs/superpowers/plans/2026-03-21-chat-layout-interaction-alignment.md deleted file mode 100644 index 23b48277f..000000000 --- a/docs/superpowers/plans/2026-03-21-chat-layout-interaction-alignment.md +++ /dev/null @@ -1,731 +0,0 @@ -# Chat Layout & Interaction Alignment Implementation Plan - -> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking. - -**Goal:** Align the Chat page's layout structure and interaction patterns with the Skill page by refactoring the sidebar, upgrading mode cards, and redesigning the input area. - -**Architecture:** Extract duplicated sidebar JSX into a reusable `ConversationGroup` component, replace conditional DOM mount/unmount with `ResizablePanel` collapsible animation, upgrade mode card styling to match SkillCard, and compact the input area to match Skill Creator's gray-bg pill pattern. - -**Tech Stack:** Next.js, React, Tailwind CSS, react-resizable-panels, lucide-react, shadcn/ui AlertDialog - -**Spec:** `docs/superpowers/specs/2026-03-21-chat-layout-interaction-alignment-design.md` - -**Verification:** No test infrastructure exists in frontend. All verification is visual — run `npm run dev` from `frontend/` and check the Chat page in browser. - ---- - -## File Structure - -| File | Action | Responsibility | -|------|--------|---------------| -| `frontend/app/chat/components/ConversationGroup.tsx` | Create | `ConversationItem` + `ConversationGroup` — extracted sidebar row + group | -| `frontend/app/chat/components/ChatSidebar.tsx` | Modify | Use `ConversationGroup` instead of 3 duplicated blocks | -| `frontend/app/chat/hooks/useChatReducer.ts` | Modify | Add `SET_SIDEBAR_VISIBLE` action | -| `frontend/app/chat/ChatLayout.tsx` | Modify | Collapsible sidebar panel with ref-based toggle | -| `frontend/app/chat/components/ChatHome.tsx` | Modify | Mode card styling + input area compaction | -| `frontend/app/chat/components/ChatInput.tsx` | Modify | Input area compaction | -| `frontend/app/chat/conversation/ConversationPanel.tsx` | Modify | Input wrapper styling | - ---- - -### Task 1: Extract ConversationGroup Component (Area 1) - -**Files:** -- Create: `frontend/app/chat/components/ConversationGroup.tsx` -- Modify: `frontend/app/chat/components/ChatSidebar.tsx` - -- [ ] **Step 1: Create `ConversationGroup.tsx` with both components** - -Create `frontend/app/chat/components/ConversationGroup.tsx`: - -```tsx -'use client' - -import { MessageSquare, Trash2, ChevronDown, ChevronRight } from 'lucide-react' - -import { - AlertDialog, - AlertDialogAction, - AlertDialogCancel, - AlertDialogContent, - AlertDialogDescription, - AlertDialogFooter, - AlertDialogHeader, - AlertDialogTitle, - AlertDialogTrigger, -} from '@/components/ui/alert-dialog' -import { cn } from '@/lib/utils' -import { useTranslation } from '@/lib/i18n' - -interface Conversation { - thread_id: string - title: string - updated_at: string -} - -interface ConversationItemProps { - conv: Conversation - isActive: boolean - isCollapsed: boolean - onSelect: (threadId: string) => void - onDeleteClick: (e: React.MouseEvent, threadId: string, title: string) => void - formatTime: (date: string) => string - deleteConfirmOpen: boolean - conversationToDelete: { threadId: string; title: string } | null - onDeleteConfirmChange: (open: boolean) => void - onConfirmDelete: () => void - onCancelDelete: () => void -} - -function ConversationItem({ - conv, - isActive, - isCollapsed, - onSelect, - onDeleteClick, - formatTime, - deleteConfirmOpen, - conversationToDelete, - onDeleteConfirmChange, - onConfirmDelete, - onCancelDelete, -}: ConversationItemProps) { - const { t } = useTranslation() - - if (isCollapsed) { - return ( -
onSelect(conv.thread_id)} - title={conv.title || t('chat.untitled')} - > - -
- ) - } - - return ( -
onSelect(conv.thread_id)} - > - -
-

- {conv.title || t('chat.untitled')} -

-

{formatTime(conv.updated_at)}

-
- - - - - - - {t('chat.deleteConversation')} - - {t('chat.deleteConversationConfirm', { - title: conversationToDelete?.title || t('chat.untitled'), - })} - - - - {t('common.cancel')} - - {t('common.delete')} - - - - -
- ) -} - -interface ConversationGroupProps { - label: string - conversations: Conversation[] - isExpanded: boolean - onToggleExpand: () => void - isCollapsed: boolean - currentThreadId: string | null - onSelectConversation: (threadId: string) => void - onDeleteClick: (e: React.MouseEvent, threadId: string, title: string) => void - formatTime: (date: string) => string - maxItems?: number - deleteConfirmOpen: boolean - conversationToDelete: { threadId: string; title: string } | null - onDeleteConfirmChange: (open: boolean) => void - onConfirmDelete: () => void - onCancelDelete: () => void -} - -export default function ConversationGroup({ - label, - conversations, - isExpanded, - onToggleExpand, - isCollapsed, - currentThreadId, - onSelectConversation, - onDeleteClick, - formatTime, - maxItems, - deleteConfirmOpen, - conversationToDelete, - onDeleteConfirmChange, - onConfirmDelete, - onCancelDelete, -}: ConversationGroupProps) { - if (conversations.length === 0) return null - - const displayConversations = maxItems ? conversations.slice(0, maxItems) : conversations - - return ( -
- {!isCollapsed && ( - - )} - {(isExpanded || isCollapsed) && ( -
- {displayConversations.map((conv) => ( - - ))} -
- )} -
- ) -} -``` - -- [ ] **Step 2: Refactor ChatSidebar to use ConversationGroup** - -In `frontend/app/chat/components/ChatSidebar.tsx`: - -**2a.** Add import at top (after other imports): -```tsx -import ConversationGroup from './ConversationGroup' -``` - -**2b.** Replace the three state variables (lines 81-83): -```tsx -// Before: -const [isTodayCollapsed, setIsTodayCollapsed] = useState(false) -const [isThisMonthCollapsed, setIsThisMonthCollapsed] = useState(true) -const [isOlderCollapsed, setIsOlderCollapsed] = useState(true) - -// After: -const [isTodayExpanded, setIsTodayExpanded] = useState(true) -const [isThisMonthExpanded, setIsThisMonthExpanded] = useState(false) -const [isOlderExpanded, setIsOlderExpanded] = useState(false) -``` - -**2c.** Replace the entire conversation list section (the three groups of todayConvs, monthConvs, olderConvs — approximately lines 185-535) with: - -```tsx - setIsTodayExpanded(!isTodayExpanded)} - isCollapsed={isCollapsed} - currentThreadId={currentThreadId} - onSelectConversation={onSelectConversation} - onDeleteClick={handleDeleteConversation} - formatTime={formatTime} - deleteConfirmOpen={deleteConfirmOpen} - conversationToDelete={conversationToDelete} - onDeleteConfirmChange={setDeleteConfirmOpen} - onConfirmDelete={handleConfirmDelete} - onCancelDelete={handleCancelDelete} -/> - setIsThisMonthExpanded(!isThisMonthExpanded)} - isCollapsed={isCollapsed} - currentThreadId={currentThreadId} - onSelectConversation={onSelectConversation} - onDeleteClick={handleDeleteConversation} - formatTime={formatTime} - deleteConfirmOpen={deleteConfirmOpen} - conversationToDelete={conversationToDelete} - onDeleteConfirmChange={setDeleteConfirmOpen} - onConfirmDelete={handleConfirmDelete} - onCancelDelete={handleCancelDelete} -/> - setIsOlderExpanded(!isOlderExpanded)} - isCollapsed={isCollapsed} - currentThreadId={currentThreadId} - onSelectConversation={onSelectConversation} - onDeleteClick={handleDeleteConversation} - formatTime={formatTime} - maxItems={10} - deleteConfirmOpen={deleteConfirmOpen} - conversationToDelete={conversationToDelete} - onDeleteConfirmChange={setDeleteConfirmOpen} - onConfirmDelete={handleConfirmDelete} - onCancelDelete={handleCancelDelete} -/> -``` - -**2d.** Remove unused imports that were only used by the inlined conversation rows: `MessageSquare`, `Trash2`, `ChevronDown`, `ChevronRight` (if no longer referenced elsewhere in the file), and remove all `AlertDialog*` imports. - -**2e.** The existing `handleDeleteConversation` handler (line 101) is kept as-is. Add two new handlers for confirm/cancel if they don't exist: - -```tsx -const handleConfirmDelete = async () => { - if (conversationToDelete) { - await deleteConversation(conversationToDelete.threadId) - } - setDeleteConfirmOpen(false) - setConversationToDelete(null) -} - -const handleCancelDelete = () => { - setDeleteConfirmOpen(false) - setConversationToDelete(null) -} -``` - -Check the existing code — if `handleConfirmDelete` already exists with this logic inline in the AlertDialog, extract it. If `handleCancelDelete` logic is inline, extract it too. - -- [ ] **Step 3: Visual verification** - -Run: `cd frontend && npm run dev` - -Check: -- Sidebar shows three conversation groups (Today, This Month, Older) -- Clicking group headers expands/collapses them -- Today and This Month start expanded, Older starts collapsed -- Conversation rows show icon, title, time, delete button on hover -- Delete confirmation dialog works -- Active conversation is highlighted in blue - -- [ ] **Step 4: Commit** - -```bash -git add frontend/app/chat/components/ConversationGroup.tsx frontend/app/chat/components/ChatSidebar.tsx -git commit -m "refactor: extract ConversationGroup component from ChatSidebar to deduplicate conversation row JSX" -``` - ---- - -### Task 2: Sidebar Collapse Animation (Area 2) - -**Files:** -- Modify: `frontend/app/chat/hooks/useChatReducer.ts` -- Modify: `frontend/app/chat/ChatLayout.tsx` - -- [ ] **Step 1: Add `SET_SIDEBAR_VISIBLE` action to reducer** - -In `frontend/app/chat/hooks/useChatReducer.ts`: - -**1a.** Add to the `ChatAction` type union (near line 95 where other actions are defined): -```tsx -| { type: 'SET_SIDEBAR_VISIBLE'; visible: boolean } -``` - -**1b.** Add reducer case (near `TOGGLE_SIDEBAR` case around line 266): -```tsx -case 'SET_SIDEBAR_VISIBLE': - return { ...state, ui: { ...state.ui, sidebarVisible: action.visible } } -``` - -- [ ] **Step 2: Refactor ChatLayout sidebar to use collapsible panel** - -In `frontend/app/chat/ChatLayout.tsx`: - -**2a.** Update imports — add `ImperativePanelHandle` from `react-resizable-panels` and `useRef` is already imported: -```tsx -// Before (line 8): -import { ResizablePanelGroup, ResizablePanel, ResizableHandle } from '@/components/ui/resizable' - -// After: -import { ResizablePanelGroup, ResizablePanel, ResizableHandle } from '@/components/ui/resizable' -import type { ImperativePanelHandle } from 'react-resizable-panels' -``` - -**2b.** Add panel ref after other refs (around line 44): -```tsx -const sidebarPanelRef = useRef(null) -``` - -**2c.** Replace the Cmd+B shortcut handler (lines 48-57): -```tsx -// Before: -useEffect(() => { - const handleKeyDown = (e: KeyboardEvent) => { - if ((e.metaKey || e.ctrlKey) && e.key === 'b') { - e.preventDefault() - dispatch({ type: 'TOGGLE_SIDEBAR' }) - } - } - window.addEventListener('keydown', handleKeyDown) - return () => window.removeEventListener('keydown', handleKeyDown) -}, [dispatch]) - -// After: -useEffect(() => { - const handleKeyDown = (e: KeyboardEvent) => { - if ((e.metaKey || e.ctrlKey) && e.key === 'b') { - e.preventDefault() - if (state.ui.sidebarVisible) { - sidebarPanelRef.current?.collapse() - } else { - sidebarPanelRef.current?.expand() - } - } - } - window.addEventListener('keydown', handleKeyDown) - return () => window.removeEventListener('keydown', handleKeyDown) -}, [state.ui.sidebarVisible]) -``` - -**2d.** Replace the header toggle button onClick (line 258): -```tsx -// Before: -onClick={() => dispatch({ type: 'TOGGLE_SIDEBAR' })} - -// After: -onClick={() => { - if (state.ui.sidebarVisible) { - sidebarPanelRef.current?.collapse() - } else { - sidebarPanelRef.current?.expand() - } -}} -``` - -**2e.** Replace the sidebar panel section (lines 322-341): -```tsx -// Before: -{state.ui.sidebarVisible && ( - <> - - dispatch({ type: 'TOGGLE_SIDEBAR' })} - onSelectConversation={handleSelectConversation} - currentThreadId={state.threadId} - onNewChat={handleNewChat} - /> - - - -)} - -// After: - dispatch({ type: 'SET_SIDEBAR_VISIBLE', visible: false })} - onExpand={() => dispatch({ type: 'SET_SIDEBAR_VISIBLE', visible: true })} - className="overflow-hidden transition-all duration-300" -> - { - if (state.ui.sidebarVisible) { - sidebarPanelRef.current?.collapse() - } else { - sidebarPanelRef.current?.expand() - } - }} - onSelectConversation={handleSelectConversation} - currentThreadId={state.threadId} - onNewChat={handleNewChat} - /> - - -``` - -- [ ] **Step 3: Visual verification** - -Run: `cd frontend && npm run dev` - -Check: -- Sidebar toggle button collapses/expands the sidebar -- Cmd+B shortcut works -- Sidebar animates (or at least doesn't flash/unmount) -- When collapsed, sidebar takes zero width -- ResizableHandle remains visible as panel edge -- Sidebar isCollapsed prop now triggers icon-only mode in ChatSidebar (if collapsed while visible) - -- [ ] **Step 4: Commit** - -```bash -git add frontend/app/chat/hooks/useChatReducer.ts frontend/app/chat/ChatLayout.tsx -git commit -m "feat: replace sidebar mount/unmount with ResizablePanel collapsible animation" -``` - ---- - -### Task 3: Mode Card Beautification (Area 3) - -**Files:** -- Modify: `frontend/app/chat/components/ChatHome.tsx` - -- [ ] **Step 1: Update mode card container className** - -In `frontend/app/chat/components/ChatHome.tsx`, find the mode card `className` (around line 644-648): - -```tsx -// Before: -'group flex cursor-pointer items-start gap-4 rounded-xl border p-4 transition-all', -isSelected - ? 'border-blue-500 bg-blue-50 shadow-md ring-1 ring-blue-100' - : 'border-gray-200 bg-white hover:border-blue-300 hover:shadow-md', - -// After: -'group flex cursor-pointer items-start gap-4 overflow-hidden rounded-xl border bg-white p-4 transition-all duration-200', -isSelected - ? 'border-blue-500 bg-blue-50 shadow-md ring-1 ring-blue-100' - : 'border-gray-200 hover:border-blue-200 hover:shadow-lg', -``` - -- [ ] **Step 2: Update icon container className** - -Find the icon container inside each card (around line 651-654): - -```tsx -// Before: -
- -// After: -
-``` - -- [ ] **Step 3: Visual verification** - -Run: `cd frontend && npm run dev` - -Navigate to Chat home page. Check: -- Mode cards have smooth `duration-200` hover transition -- Hover shows `shadow-lg` and `border-blue-200` (softer than before) -- Icon containers are uniformly blue (`bg-blue-50` with `border-blue-100`) regardless of selected state -- Selected card still shows blue ring and blue background -- Cards have `overflow-hidden` - -- [ ] **Step 4: Commit** - -```bash -git add frontend/app/chat/components/ChatHome.tsx -git commit -m "style: upgrade mode card styling to match SkillCard design language" -``` - ---- - -### Task 4: Input Area Alignment (Area 4) - -**Files:** -- Modify: `frontend/app/chat/conversation/ConversationPanel.tsx` -- Modify: `frontend/app/chat/components/ChatInput.tsx` -- Modify: `frontend/app/chat/components/ChatHome.tsx` - -- [ ] **Step 1: Update ConversationPanel input wrapper** - -In `frontend/app/chat/conversation/ConversationPanel.tsx` line 76: - -```tsx -// Before: -
- -// After: -
-``` - -- [ ] **Step 2: Update ChatInput container** - -In `frontend/app/chat/components/ChatInput.tsx` line 134: - -```tsx -// Before: -'flex items-end gap-3 rounded-[24px] border border-gray-200 bg-white p-4 shadow-md transition-all', - -// After: -'flex items-end gap-2 rounded-2xl border border-gray-200 bg-gray-50 px-4 py-3 transition-all', -``` - -- [ ] **Step 3: Update ChatInput textarea** - -In `frontend/app/chat/components/ChatInput.tsx` line 157: - -```tsx -// Before: -className="max-h-[200px] min-h-[100px] flex-1 resize-none overflow-y-auto border-none bg-transparent px-0.5 pb-6 pt-4 text-base shadow-none placeholder:text-gray-400 focus:outline-none focus-visible:ring-0" - -// After: -className="max-h-[160px] min-h-[24px] flex-1 resize-none overflow-y-auto border-none bg-transparent text-sm shadow-none placeholder:text-gray-400 focus:outline-none focus-visible:ring-0" -``` - -- [ ] **Step 4: Update ChatInput buttons** - -In `frontend/app/chat/components/ChatInput.tsx`: - -Attach button (line 169): change `h-10 w-10 rounded-2xl` to `h-8 w-8 rounded-xl` -Stop button (line 180): change `h-10 w-10` to `h-8 w-8` -Send button (line 191): change `h-10 w-10` to `h-8 w-8` - -- [ ] **Step 5: Update ChatInput auto-resize cap** - -In `frontend/app/chat/components/ChatInput.tsx` line 58: - -```tsx -// Before: -textareaRef.current.style.height = `${Math.min(textareaRef.current.scrollHeight, 200)}px` - -// After: -textareaRef.current.style.height = `${Math.min(textareaRef.current.scrollHeight, 160)}px` -``` - -- [ ] **Step 6: Update ChatHome input outer container** - -In `frontend/app/chat/components/ChatHome.tsx` around line 419: - -```tsx -// Before: -
- -// After: -
-``` - -- [ ] **Step 7: Update ChatHome input inner container** - -In `frontend/app/chat/components/ChatHome.tsx` around line 420: - -```tsx -// Before: -
- -// After: -
-``` - -- [ ] **Step 8: Update ChatHome textarea** - -In `frontend/app/chat/components/ChatHome.tsx` around line 480: - -```tsx -// Before: -className="max-h-[240px] min-h-[120px] w-full resize-none overflow-y-auto border-none bg-transparent px-1 pb-14 pt-5 text-base shadow-none transition-all duration-200 placeholder:text-gray-400 focus:outline-none focus-visible:ring-0" - -// After: -className="max-h-[160px] min-h-[24px] w-full resize-none overflow-y-auto border-none bg-transparent text-sm shadow-none placeholder:text-gray-400 focus:outline-none focus-visible:ring-0" -``` - -- [ ] **Step 9: Update ChatHome bottom buttons position** - -In `frontend/app/chat/components/ChatHome.tsx` around line 484, the absolutely-positioned buttons container: - -```tsx -// Before: -absolute bottom-2 left-1 - -// After: -absolute bottom-1 left-1 -``` - -- [ ] **Step 10: Update ChatHome buttons size** - -In `frontend/app/chat/components/ChatHome.tsx` (lines 552-609), change all button sizes: - -All `h-10 w-10` → `h-8 w-8` (attach, stop, send buttons) - -- [ ] **Step 11: Update ChatHome auto-resize cap** - -In `frontend/app/chat/components/ChatHome.tsx` around line 137: - -```tsx -// Before: -textareaRef.current.style.height = `${Math.min(textareaRef.current.scrollHeight, 200)}px` - -// After: -textareaRef.current.style.height = `${Math.min(textareaRef.current.scrollHeight, 160)}px` -``` - -- [ ] **Step 12: Visual verification** - -Run: `cd frontend && npm run dev` - -Check Chat home page: -- Input area uses gray background (`bg-gray-50`) -- Input is compact — no oversized textarea -- Buttons are smaller (`h-8 w-8`) -- No bottom overflow on small viewports -- Text is `text-sm` (not `text-base`) -- Bottom buttons don't overlap text content - -Check Chat conversation page: -- Input wrapper has subtle `border-t` instead of heavy shadow -- Input container matches home page styling -- Textarea starts small and grows with content up to 160px max - -- [ ] **Step 13: Commit** - -```bash -git add frontend/app/chat/conversation/ConversationPanel.tsx frontend/app/chat/components/ChatInput.tsx frontend/app/chat/components/ChatHome.tsx -git commit -m "style: compact input area to match Skill Creator pattern" -``` diff --git a/docs/superpowers/plans/2026-03-21-chat-ui-beautification.md b/docs/superpowers/plans/2026-03-21-chat-ui-beautification.md deleted file mode 100644 index ab6023c90..000000000 --- a/docs/superpowers/plans/2026-03-21-chat-ui-beautification.md +++ /dev/null @@ -1,622 +0,0 @@ -# Chat UI Beautification Implementation Plan - -> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking. - -**Goal:** Align the Chat page's visual quality with the Skill page by establishing a consistent blue brand color, improving surface depth, upgrading message bubbles, sidebar, header, empty/loading states, and standardizing animations. - -**Architecture:** Pure CSS/Tailwind class changes across 8 existing files. No new components, no state changes, no business logic changes. One new import (`getModeConfig`) in ChatLayout for header title derivation. - -**Tech Stack:** Next.js App Router, Tailwind CSS, shadcn/ui ` - -// TO: - -``` - -- [ ] **Step 5: Add center title** - -Insert after the new chat button's closing `` (after line ~280), before the preview toggle conditional block. Use `useChatState()` (already available) and `useTranslation()` (already imported): - -```tsx -
- - {t(getModeConfig(state.mode.currentMode)?.labelKey || 'chat.modes.default-chat')} - -
-``` - -Note: `state.mode.currentMode` is the correct field (ChatState has no `currentTitle` field). The fallback key `chat.modes.default-chat` should already exist in the i18n files as it's used by modeConfig. If it doesn't exist, use a `defaultValue`: `t(... || 'chat.modes.default-chat', { defaultValue: 'Chat' })`. - -- [ ] **Step 6: Update preview toggle hover** - -Around line 294, the preview toggle button currently has `className="h-9 w-9 p-0 transition-colors hover:bg-gray-100"`. Add `rounded-lg`: - -```tsx -// FROM: -className="h-9 w-9 p-0 transition-colors hover:bg-gray-100" - -// TO: -className="h-9 w-9 rounded-lg p-0 transition-colors hover:bg-gray-100" -``` - -- [ ] **Step 7: Verify** - -Check: Header should be taller (h-14), white bg with bottom border. New chat button is blue circle. Center shows mode name. Sidebar/preview toggles have consistent hover. - -- [ ] **Step 8: Commit** - -```bash -git add frontend/app/chat/ChatLayout.tsx -git commit -m "style: redesign header - white bg, branded new-chat button, center title" -``` - ---- - -### Task 5: ConversationPanel & ChatInput — Surface Depth & Brand Color - -**Files:** -- Modify: `frontend/app/chat/conversation/ConversationPanel.tsx` -- Modify: `frontend/app/chat/components/ChatInput.tsx` - -**Covers:** Area 1 (brand color), Area 5 (surface depth) - -- [ ] **Step 1: Update input wrapper in ConversationPanel** - -In `ConversationPanel.tsx` around line 76: - -```tsx -// FROM: -
- -// TO: -
-``` - -- [ ] **Step 2: Upgrade ChatInput container shadow** - -In `ChatInput.tsx` around line 134: - -```tsx -// FROM: -'flex items-end gap-3 rounded-[24px] border border-gray-200 bg-white p-4 shadow-sm transition-all', - -// TO: -'flex items-end gap-3 rounded-[24px] border border-gray-200 bg-white p-4 shadow-md transition-all', -``` - -- [ ] **Step 3: Rebrand ChatInput send button** - -In `ChatInput.tsx` around lines 192-193: - -```tsx -// FROM: -canSubmit && !isProcessing && !isUploading - ? 'bg-gray-900 hover:bg-gray-800' - : 'cursor-not-allowed bg-gray-100', - -// TO: -canSubmit && !isProcessing && !isUploading - ? 'bg-blue-600 hover:bg-blue-700' - : 'cursor-not-allowed bg-gray-100', -``` - -- [ ] **Step 4: Verify** - -Check: Input area has upward soft shadow instead of hard border-top line. Input container has slightly stronger shadow. Send button is blue when active. - -- [ ] **Step 5: Commit** - -```bash -git add frontend/app/chat/conversation/ConversationPanel.tsx frontend/app/chat/components/ChatInput.tsx -git commit -m "style: upgrade input area surface depth and brand send button" -``` - ---- - -### Task 6: ChatHome — Send Button & Mode Card - -**Files:** -- Modify: `frontend/app/chat/components/ChatHome.tsx` - -**Covers:** Area 1 (brand color) - -- [ ] **Step 1: Rebrand ChatHome send button** - -Around lines 591-592: - -```tsx -// FROM: -state.input.trim() && !isProcessing && !state.isRedirecting - ? 'bg-gray-900 hover:bg-gray-800' - : 'cursor-not-allowed bg-gray-100', - -// TO: -state.input.trim() && !isProcessing && !state.isRedirecting - ? 'bg-blue-600 hover:bg-blue-700' - : 'cursor-not-allowed bg-gray-100', -``` - -- [ ] **Step 2: Add ring to selected mode card** - -Around lines 646-647: - -```tsx -// FROM: -isSelected - ? 'border-blue-500 bg-blue-50 shadow-md' - : 'border-gray-200 bg-white hover:border-blue-300 hover:shadow-md', - -// TO: -isSelected - ? 'border-blue-500 bg-blue-50 shadow-md ring-1 ring-blue-100' - : 'border-gray-200 bg-white hover:border-blue-300 hover:shadow-md', -``` - -- [ ] **Step 3: Verify** - -Check: ChatHome send button is blue. Selected mode card has subtle blue ring. - -- [ ] **Step 4: Commit** - -```bash -git add frontend/app/chat/components/ChatHome.tsx -git commit -m "style: brand ChatHome send button and mode card ring" -``` - ---- - -### Task 7: PreviewPanel — Tab Animation - -**Files:** -- Modify: `frontend/app/chat/preview/PreviewPanel.tsx` - -**Covers:** Area 5 (surface depth — already bg-white, no change), Area 7 (animation) - -- [ ] **Step 1: Add transition to Files tab button** - -Around lines 28-31, add `transition-all duration-200` to the className: - -```tsx -// FROM: -className={`flex items-center gap-1.5 rounded-md px-2.5 py-1.5 text-sm ${ - activeTab === 'files' - ? 'bg-gray-100 font-medium' - : 'text-gray-500 hover:text-gray-700' -}`} - -// TO: -className={`flex items-center gap-1.5 rounded-md px-2.5 py-1.5 text-sm transition-all duration-200 ${ - activeTab === 'files' - ? 'bg-gray-100 font-medium' - : 'text-gray-500 hover:text-gray-700' -}`} -``` - -- [ ] **Step 2: Add transition to Tool tab button** - -Around lines 41-44, same change: - -```tsx -// FROM: -className={`flex items-center gap-1.5 rounded-md px-2.5 py-1.5 text-sm ${ - activeTab === 'tool' - ? 'bg-gray-100 font-medium' - : 'text-gray-500 hover:text-gray-700' -}`} - -// TO: -className={`flex items-center gap-1.5 rounded-md px-2.5 py-1.5 text-sm transition-all duration-200 ${ - activeTab === 'tool' - ? 'bg-gray-100 font-medium' - : 'text-gray-500 hover:text-gray-700' -}`} -``` - -- [ ] **Step 3: Verify** - -Check: Switching tabs in PreviewPanel should animate smoothly. - -- [ ] **Step 4: Commit** - -```bash -git add frontend/app/chat/preview/PreviewPanel.tsx -git commit -m "style: add transition animation to PreviewPanel tabs" -``` - ---- - -## Final Verification - -- [ ] Run full dev server and test all 7 areas visually -- [ ] Verify no TypeScript errors: `cd frontend && npx tsc --noEmit` -- [ ] Verify no lint errors: `cd frontend && npm run lint` -- [ ] Final commit if any adjustments needed diff --git a/docs/superpowers/plans/2026-03-23-skill-frontend-versioning-collab-token.md b/docs/superpowers/plans/2026-03-23-skill-frontend-versioning-collab-token.md deleted file mode 100644 index 90764c083..000000000 --- a/docs/superpowers/plans/2026-03-23-skill-frontend-versioning-collab-token.md +++ /dev/null @@ -1,2093 +0,0 @@ -# Skill 前端:版本管理 + 协作者管理 + Token 管理 实现计划 - -> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking. - -**Goal:** Implement the complete frontend UI for skill versioning, collaborator management, and platform token management. - -**Architecture:** Three service files (one per API domain) expose async methods calling `apiGet/apiPost/apiPut/apiDelete`. Three React Query hook files provide `useQuery`/`useMutation` wrappers with key factories. UI components integrate into SkillsManager (version + collaborator tabs) and Settings dialog (token page). All strings use i18n. - -**Tech Stack:** Next.js + React 19, TypeScript, React Query v5, shadcn/ui (Radix), Tailwind CSS, react-hook-form + Zod, lucide-react icons. - -**Spec:** `docs/superpowers/specs/2026-03-23-skill-frontend-versioning-collab-token-design.md` - ---- - -## Task 1: Backend — Extend VersionSummarySchema with `published_by_id` - -**Files:** -- Modify: `backend/app/schemas/skill_version.py` (VersionSummarySchema class, ~line 78) - -- [ ] **Step 1: Add `published_by_id` to VersionSummarySchema** - -```python -class VersionSummarySchema(BaseModel): - """Lightweight version info for list endpoints.""" - - version: str - release_notes: Optional[str] = None - published_by_id: str - published_at: Optional[str] = None - - @field_validator("published_at", mode="before") - @classmethod - def convert_datetime_to_str(cls, v): - if isinstance(v, datetime): - return v.isoformat() - return v - - class Config: - from_attributes = True -``` - -- [ ] **Step 2: Verify existing tests still pass** - -Run: `cd backend && python -m pytest tests/ -x -q --timeout=30 2>/dev/null || echo "no tests or pass"` - -- [ ] **Step 3: Commit** - -```bash -git add backend/app/schemas/skill_version.py -git commit -m "feat(schema): add published_by_id to VersionSummarySchema for frontend display" -``` - ---- - -## Task 2: Service — `skillVersionService.ts` - -**Files:** -- Create: `frontend/services/skillVersionService.ts` - -- [ ] **Step 1: Create the service file** - -```typescript -import { apiGet, apiPost, apiDelete } from '@/lib/api-client' - -// ---------- Types ---------- - -export interface SkillVersionSummary { - version: string - releaseNotes: string | null - publishedById: string - publishedAt: string | null -} - -export interface SkillVersionFile { - id: string - versionId: string - path: string - fileName: string - fileType: string - content: string | null - storageType: string - storageKey: string | null - size: number -} - -export interface SkillVersion { - id: string - skillId: string - version: string - releaseNotes: string | null - skillName: string - skillDescription: string - content: string - tags: string[] - metadata: Record - allowedTools: string[] - compatibility: string | null - license: string | null - publishedById: string - publishedAt: string | null - createdAt: string | null - files: SkillVersionFile[] | null -} - -// ---------- Normalizers ---------- - -function normalizeVersionSummary(raw: any): SkillVersionSummary { - return { - version: raw.version, - releaseNotes: raw.release_notes ?? null, - publishedById: raw.published_by_id, - publishedAt: raw.published_at ?? null, - } -} - -function normalizeVersion(raw: any): SkillVersion { - return { - id: raw.id, - skillId: raw.skill_id, - version: raw.version, - releaseNotes: raw.release_notes ?? null, - skillName: raw.skill_name, - skillDescription: raw.skill_description, - content: raw.content, - tags: raw.tags ?? [], - metadata: raw.metadata ?? {}, - allowedTools: raw.allowed_tools ?? [], - compatibility: raw.compatibility ?? null, - license: raw.license ?? null, - publishedById: raw.published_by_id, - publishedAt: raw.published_at ?? null, - createdAt: raw.created_at ?? null, - files: raw.files?.map((f: any) => ({ - id: f.id, - versionId: f.version_id, - path: f.path, - fileName: f.file_name, - fileType: f.file_type, - content: f.content ?? null, - storageType: f.storage_type, - storageKey: f.storage_key ?? null, - size: f.size ?? 0, - })) ?? null, - } -} - -// ---------- Service ---------- - -export const skillVersionService = { - async listVersions(skillId: string): Promise { - const data = await apiGet(`skills/${skillId}/versions`) - return (Array.isArray(data) ? data : []).map(normalizeVersionSummary) - }, - - async getVersion(skillId: string, version: string): Promise { - const data = await apiGet(`skills/${skillId}/versions/${version}`) - return normalizeVersion(data) - }, - - async getLatestVersion(skillId: string): Promise { - const data = await apiGet(`skills/${skillId}/versions/latest`) - return normalizeVersion(data) - }, - - async publishVersion(skillId: string, payload: { version: string; release_notes?: string }): Promise { - const data = await apiPost(`skills/${skillId}/versions`, payload) - return normalizeVersion(data) - }, - - async deleteVersion(skillId: string, version: string): Promise { - await apiDelete(`skills/${skillId}/versions/${version}`) - }, - - async restoreDraft(skillId: string, payload: { version: string }): Promise { - return await apiPost(`skills/${skillId}/restore`, payload) - }, -} -``` - -- [ ] **Step 2: Commit** - -```bash -git add frontend/services/skillVersionService.ts -git commit -m "feat: add skillVersionService with all version API methods" -``` - ---- - -## Task 3: Service — `skillCollaboratorService.ts` - -**Files:** -- Create: `frontend/services/skillCollaboratorService.ts` - -- [ ] **Step 1: Create the service file** - -```typescript -import { apiGet, apiPost, apiPut, apiDelete } from '@/lib/api-client' - -// ---------- Types ---------- - -export type CollaboratorRole = 'viewer' | 'editor' | 'publisher' | 'admin' - -export interface SkillCollaborator { - id: string - skillId: string - userId: string - role: CollaboratorRole - invitedBy: string - createdAt: string | null -} - -// ---------- Normalizer ---------- - -function normalizeCollaborator(raw: any): SkillCollaborator { - return { - id: raw.id, - skillId: raw.skill_id, - userId: raw.user_id, - role: raw.role, - invitedBy: raw.invited_by, - createdAt: raw.created_at ?? null, - } -} - -// ---------- Service ---------- - -export const skillCollaboratorService = { - async listCollaborators(skillId: string): Promise { - const data = await apiGet(`skills/${skillId}/collaborators`) - return (Array.isArray(data) ? data : []).map(normalizeCollaborator) - }, - - async addCollaborator(skillId: string, payload: { user_id: string; role: CollaboratorRole }): Promise { - const data = await apiPost(`skills/${skillId}/collaborators`, payload) - return normalizeCollaborator(data) - }, - - async updateRole(skillId: string, userId: string, payload: { role: CollaboratorRole }): Promise { - const data = await apiPut(`skills/${skillId}/collaborators/${userId}`, payload) - return normalizeCollaborator(data) - }, - - async removeCollaborator(skillId: string, userId: string): Promise { - await apiDelete(`skills/${skillId}/collaborators/${userId}`) - }, - - async transferOwnership(skillId: string, payload: { new_owner_id: string }): Promise { - await apiPost(`skills/${skillId}/transfer`, payload) - }, -} -``` - -- [ ] **Step 2: Commit** - -```bash -git add frontend/services/skillCollaboratorService.ts -git commit -m "feat: add skillCollaboratorService with collaborator API methods" -``` - ---- - -## Task 4: Service — `platformTokenService.ts` - -**Files:** -- Create: `frontend/services/platformTokenService.ts` - -- [ ] **Step 1: Create the service file** - -```typescript -import { apiGet, apiPost, apiDelete } from '@/lib/api-client' - -// ---------- Types ---------- - -export interface PlatformToken { - id: string - name: string - tokenPrefix: string - scopes: string[] - resourceType: string | null - resourceId: string | null - expiresAt: string | null - lastUsedAt: string | null - isActive: boolean - createdAt: string | null -} - -export interface PlatformTokenCreateResponse { - id: string - name: string - token: string - tokenPrefix: string - scopes: string[] - expiresAt: string | null -} - -export interface TokenCreateRequest { - name: string - scopes: string[] - expires_at?: string | null -} - -// ---------- Normalizers ---------- - -function normalizeToken(raw: any): PlatformToken { - return { - id: raw.id, - name: raw.name, - tokenPrefix: raw.token_prefix, - scopes: raw.scopes ?? [], - resourceType: raw.resource_type ?? null, - resourceId: raw.resource_id ?? null, - expiresAt: raw.expires_at ?? null, - lastUsedAt: raw.last_used_at ?? null, - isActive: raw.is_active ?? true, - createdAt: raw.created_at ?? null, - } -} - -function normalizeTokenCreateResponse(raw: any): PlatformTokenCreateResponse { - return { - id: raw.id, - name: raw.name, - token: raw.token, - tokenPrefix: raw.token_prefix, - scopes: raw.scopes ?? [], - expiresAt: raw.expires_at ?? null, - } -} - -// ---------- Service ---------- - -export const platformTokenService = { - async listTokens(): Promise { - const data = await apiGet('tokens') - return (Array.isArray(data) ? data : []).map(normalizeToken) - }, - - async createToken(payload: TokenCreateRequest): Promise { - const data = await apiPost('tokens', payload) - return normalizeTokenCreateResponse(data) - }, - - async revokeToken(tokenId: string): Promise { - await apiDelete(`tokens/${tokenId}`) - }, -} -``` - -- [ ] **Step 2: Commit** - -```bash -git add frontend/services/platformTokenService.ts -git commit -m "feat: add platformTokenService with token CRUD methods" -``` - ---- - -## Task 5: React Query Hooks — `skillVersions.ts` - -**Files:** -- Create: `frontend/hooks/queries/skillVersions.ts` -- Modify: `frontend/hooks/queries/index.ts` (add export) - -- [ ] **Step 1: Create the query hooks file** - -```typescript -import { useMutation, useQuery, useQueryClient } from '@tanstack/react-query' - -import { STALE_TIME } from './constants' -import { skillKeys } from './skills' -import { skillVersionService } from '@/services/skillVersionService' -import type { SkillVersionSummary, SkillVersion } from '@/services/skillVersionService' - -export { type SkillVersionSummary, type SkillVersion } from '@/services/skillVersionService' - -// ---------- Key factory ---------- - -export const skillVersionKeys = { - all: ['skill-versions'] as const, - list: (skillId: string) => [...skillVersionKeys.all, 'list', skillId] as const, - detail: (skillId: string, version: string) => - [...skillVersionKeys.all, 'detail', skillId, version] as const, - latest: (skillId: string) => [...skillVersionKeys.all, 'latest', skillId] as const, -} - -// ---------- Queries ---------- - -export function useSkillVersions(skillId: string) { - return useQuery({ - queryKey: skillVersionKeys.list(skillId), - queryFn: () => skillVersionService.listVersions(skillId), - enabled: !!skillId, - retry: false, - staleTime: STALE_TIME.STANDARD, - }) -} - -export function useSkillVersion(skillId: string, version: string) { - return useQuery({ - queryKey: skillVersionKeys.detail(skillId, version), - queryFn: () => skillVersionService.getVersion(skillId, version), - enabled: !!skillId && !!version, - retry: false, - staleTime: STALE_TIME.STANDARD, - }) -} - -// ---------- Mutations ---------- - -export function usePublishVersion(skillId: string) { - const queryClient = useQueryClient() - return useMutation({ - mutationFn: (payload: { version: string; release_notes?: string }) => - skillVersionService.publishVersion(skillId, payload), - onSuccess: () => { - queryClient.invalidateQueries({ queryKey: skillVersionKeys.list(skillId) }) - queryClient.invalidateQueries({ queryKey: skillKeys.all }) - }, - }) -} - -export function useDeleteVersion(skillId: string) { - const queryClient = useQueryClient() - return useMutation({ - mutationFn: (version: string) => skillVersionService.deleteVersion(skillId, version), - onSuccess: () => { - queryClient.invalidateQueries({ queryKey: skillVersionKeys.list(skillId) }) - }, - }) -} - -export function useRestoreDraft(skillId: string) { - const queryClient = useQueryClient() - return useMutation({ - mutationFn: (version: string) => - skillVersionService.restoreDraft(skillId, { version }), - onSuccess: () => { - queryClient.invalidateQueries({ queryKey: skillKeys.all }) - }, - }) -} -``` - -- [ ] **Step 2: Add export to index.ts** - -In `frontend/hooks/queries/index.ts`, add: - -```typescript -export * from './skillVersions' -``` - -- [ ] **Step 3: Commit** - -```bash -git add frontend/hooks/queries/skillVersions.ts frontend/hooks/queries/index.ts -git commit -m "feat: add React Query hooks for skill versions" -``` - ---- - -## Task 6: React Query Hooks — `skillCollaborators.ts` - -**Files:** -- Create: `frontend/hooks/queries/skillCollaborators.ts` -- Modify: `frontend/hooks/queries/index.ts` (add export) - -- [ ] **Step 1: Create the query hooks file** - -```typescript -import { useMutation, useQuery, useQueryClient } from '@tanstack/react-query' - -import { STALE_TIME } from './constants' -import { skillKeys } from './skills' -import { skillCollaboratorService } from '@/services/skillCollaboratorService' -import type { SkillCollaborator, CollaboratorRole } from '@/services/skillCollaboratorService' - -export { type SkillCollaborator, type CollaboratorRole } from '@/services/skillCollaboratorService' - -// ---------- Key factory ---------- - -export const skillCollaboratorKeys = { - all: ['skill-collaborators'] as const, - list: (skillId: string) => [...skillCollaboratorKeys.all, 'list', skillId] as const, -} - -// ---------- Queries ---------- - -export function useSkillCollaborators(skillId: string) { - return useQuery({ - queryKey: skillCollaboratorKeys.list(skillId), - queryFn: () => skillCollaboratorService.listCollaborators(skillId), - enabled: !!skillId, - retry: false, - staleTime: STALE_TIME.STANDARD, - }) -} - -// ---------- Mutations ---------- - -export function useAddCollaborator(skillId: string) { - const queryClient = useQueryClient() - return useMutation({ - mutationFn: (payload: { user_id: string; role: CollaboratorRole }) => - skillCollaboratorService.addCollaborator(skillId, payload), - onSuccess: () => { - queryClient.invalidateQueries({ queryKey: skillCollaboratorKeys.list(skillId) }) - }, - }) -} - -export function useUpdateCollaboratorRole(skillId: string) { - const queryClient = useQueryClient() - return useMutation({ - mutationFn: ({ userId, role }: { userId: string; role: CollaboratorRole }) => - skillCollaboratorService.updateRole(skillId, userId, { role }), - onSuccess: () => { - queryClient.invalidateQueries({ queryKey: skillCollaboratorKeys.list(skillId) }) - }, - }) -} - -export function useRemoveCollaborator(skillId: string) { - const queryClient = useQueryClient() - return useMutation({ - mutationFn: (userId: string) => - skillCollaboratorService.removeCollaborator(skillId, userId), - onSuccess: () => { - queryClient.invalidateQueries({ queryKey: skillCollaboratorKeys.list(skillId) }) - }, - }) -} - -export function useTransferOwnership(skillId: string) { - const queryClient = useQueryClient() - return useMutation({ - mutationFn: (newOwnerId: string) => - skillCollaboratorService.transferOwnership(skillId, { new_owner_id: newOwnerId }), - onSuccess: () => { - queryClient.invalidateQueries({ queryKey: skillCollaboratorKeys.list(skillId) }) - queryClient.invalidateQueries({ queryKey: skillKeys.all }) - }, - }) -} -``` - -- [ ] **Step 2: Add export to index.ts** - -In `frontend/hooks/queries/index.ts`, add: - -```typescript -export * from './skillCollaborators' -``` - -- [ ] **Step 3: Commit** - -```bash -git add frontend/hooks/queries/skillCollaborators.ts frontend/hooks/queries/index.ts -git commit -m "feat: add React Query hooks for skill collaborators" -``` - ---- - -## Task 7: React Query Hooks — `platformTokens.ts` - -**Files:** -- Create: `frontend/hooks/queries/platformTokens.ts` -- Modify: `frontend/hooks/queries/index.ts` (add export) - -- [ ] **Step 1: Create the query hooks file** - -```typescript -import { useMutation, useQuery, useQueryClient } from '@tanstack/react-query' - -import { STALE_TIME } from './constants' -import { platformTokenService } from '@/services/platformTokenService' -import type { PlatformToken, PlatformTokenCreateResponse, TokenCreateRequest } from '@/services/platformTokenService' - -export { type PlatformToken, type PlatformTokenCreateResponse, type TokenCreateRequest } from '@/services/platformTokenService' - -// ---------- Key factory ---------- - -export const platformTokenKeys = { - all: ['platform-tokens'] as const, - list: () => [...platformTokenKeys.all, 'list'] as const, -} - -// ---------- Queries ---------- - -export function usePlatformTokens() { - return useQuery({ - queryKey: platformTokenKeys.list(), - queryFn: () => platformTokenService.listTokens(), - retry: false, - staleTime: STALE_TIME.LONG, - }) -} - -// ---------- Mutations ---------- - -export function useCreateToken() { - const queryClient = useQueryClient() - return useMutation({ - mutationFn: (payload: TokenCreateRequest) => - platformTokenService.createToken(payload), - onSuccess: () => { - queryClient.invalidateQueries({ queryKey: platformTokenKeys.all }) - }, - }) -} - -export function useRevokeToken() { - const queryClient = useQueryClient() - return useMutation({ - mutationFn: (tokenId: string) => platformTokenService.revokeToken(tokenId), - onSuccess: () => { - queryClient.invalidateQueries({ queryKey: platformTokenKeys.all }) - }, - }) -} -``` - -- [ ] **Step 2: Add export to index.ts** - -In `frontend/hooks/queries/index.ts`, add: - -```typescript -export * from './platformTokens' -``` - -- [ ] **Step 3: Commit** - -```bash -git add frontend/hooks/queries/platformTokens.ts frontend/hooks/queries/index.ts -git commit -m "feat: add React Query hooks for platform tokens" -``` - ---- - -## Task 8: i18n — Add translation keys for all three features - -**Files:** -- Modify: `frontend/lib/i18n/locales/en.ts` -- Modify: `frontend/lib/i18n/locales/zh.ts` - -- [ ] **Step 1: Add English translation keys** - -Add `editor` key to the `skills` block (if not already present): - -```typescript -editor: 'Editor', -``` - -Add inside the `settings` block, after the `sandboxes` key: - -```typescript -tokens: { - title: 'API Tokens', - description: 'Manage your API access tokens', - create: 'Create Token', - revoke: 'Revoke', - revokeConfirmTitle: 'Revoke Token', - revokeConfirmMessage: 'API calls using this token will fail immediately after revoking.', - name: 'Token Name', - namePlaceholder: 'e.g. CI deploy token', - scopes: 'Scopes', - expiresAt: 'Expiration', - noExpiry: 'No expiry', - lastUsed: 'Last used', - neverUsed: 'never', - createdSuccess: 'Token created successfully', - revokedSuccess: 'Token revoked', - tokenCreatedTitle: 'Token Created', - tokenCreatedMessage: 'Copy this token now. You won\'t be able to see it again.', - copyToken: 'Copy Token', - copied: 'Copied!', - emptyState: 'No API tokens created yet', - limitReached: 'Token limit reached (50)', - justNow: 'just now', - minutesAgo: '{{count}}m ago', - hoursAgo: '{{count}}h ago', - daysAgo: '{{count}}d ago', -}, -``` - -Add a new top-level `skillVersions` block: - -```typescript -skillVersions: { - title: 'Version History', - publish: 'Publish New Version', - publishButton: 'Publish', - versionNumber: 'Version Number', - versionPlaceholder: 'e.g. 1.0.0', - releaseNotes: 'Release Notes', - releaseNotesPlaceholder: 'Describe what changed...', - restore: 'Restore', - delete: 'Delete', - restoreConfirmTitle: 'Restore Draft', - restoreConfirmMessage: 'This will overwrite the current draft with version {{version}}. Continue?', - deleteConfirmTitle: 'Delete Version', - deleteConfirmMessage: 'This version will be permanently deleted. This cannot be undone.', - publishedBy: 'by {{user}}', - publishedSuccess: 'Version {{version}} published', - restoredSuccess: 'Draft restored from version {{version}}', - deletedSuccess: 'Version {{version}} deleted', - emptyState: 'No versions published yet', - invalidVersion: 'Must be MAJOR.MINOR.PATCH format (e.g. 1.0.0)', -}, -``` - -Add a new top-level `skillCollaborators` block: - -```typescript -skillCollaborators: { - title: 'Collaborators', - add: 'Add Collaborator', - userId: 'User ID', - userIdPlaceholder: 'Enter user ID', - role: 'Role', - owner: 'owner', - viewer: 'viewer', - editor: 'editor', - publisher: 'publisher', - admin: 'admin', - remove: 'Remove', - removeConfirmTitle: 'Remove Collaborator', - removeConfirmMessage: 'This user will lose access to this skill.', - transferOwnership: 'Transfer Ownership', - transferConfirmTitle: 'Transfer Ownership', - transferConfirmMessage: 'You will become an admin collaborator. The new owner will have full control.', - newOwner: 'New Owner', - newOwnerPlaceholder: 'Enter new owner user ID', - addedSuccess: 'Collaborator added', - updatedSuccess: 'Role updated', - removedSuccess: 'Collaborator removed', - transferredSuccess: 'Ownership transferred', - emptyState: 'No collaborators yet', -}, -``` - -- [ ] **Step 2: Add Chinese translation keys** - -Add the same structure in `zh.ts` with Chinese translations. - -Add `editor` key to the `skills` block: - -```typescript -editor: '编辑器', -``` - -`settings.tokens`: -```typescript -tokens: { - title: 'API Token', - description: '管理您的 API 访问令牌', - create: '创建 Token', - revoke: '撤销', - revokeConfirmTitle: '撤销 Token', - revokeConfirmMessage: '撤销后,使用此 Token 的 API 调用将立即失败。', - name: 'Token 名称', - namePlaceholder: '例如 CI 部署 Token', - scopes: '权限范围', - expiresAt: '过期时间', - noExpiry: '永不过期', - lastUsed: '最后使用', - neverUsed: '从未使用', - createdSuccess: 'Token 创建成功', - revokedSuccess: 'Token 已撤销', - tokenCreatedTitle: 'Token 已创建', - tokenCreatedMessage: '请立即复制此 Token,关闭后将无法再次查看。', - copyToken: '复制 Token', - copied: '已复制!', - emptyState: '尚未创建 API Token', - limitReached: '已达 Token 数量上限(50)', - justNow: '刚刚', - minutesAgo: '{{count}} 分钟前', - hoursAgo: '{{count}} 小时前', - daysAgo: '{{count}} 天前', -}, -``` - -`skillVersions`: -```typescript -skillVersions: { - title: '版本历史', - publish: '发布新版本', - publishButton: '发布', - versionNumber: '版本号', - versionPlaceholder: '例如 1.0.0', - releaseNotes: '更新说明', - releaseNotesPlaceholder: '描述变更内容...', - restore: '恢复', - delete: '删除', - restoreConfirmTitle: '恢复 Draft', - restoreConfirmMessage: '这将用版本 {{version}} 覆盖当前 Draft。是否继续?', - deleteConfirmTitle: '删除版本', - deleteConfirmMessage: '此版本将被永久删除,此操作不可撤销。', - publishedBy: '由 {{user}}', - publishedSuccess: '版本 {{version}} 已发布', - restoredSuccess: '已从版本 {{version}} 恢复 Draft', - deletedSuccess: '版本 {{version}} 已删除', - emptyState: '尚未发布任何版本', - invalidVersion: '格式须为 MAJOR.MINOR.PATCH(如 1.0.0)', -}, -``` - -`skillCollaborators`: -```typescript -skillCollaborators: { - title: '协作者', - add: '添加协作者', - userId: '用户 ID', - userIdPlaceholder: '输入用户 ID', - role: '角色', - owner: '所有者', - viewer: '查看者', - editor: '编辑者', - publisher: '发布者', - admin: '管理员', - remove: '移除', - removeConfirmTitle: '移除协作者', - removeConfirmMessage: '该用户将失去对此 Skill 的访问权限。', - transferOwnership: '转让所有权', - transferConfirmTitle: '转让所有权', - transferConfirmMessage: '您将成为管理员协作者,新所有者将拥有完全控制权。', - newOwner: '新所有者', - newOwnerPlaceholder: '输入新所有者用户 ID', - addedSuccess: '协作者已添加', - updatedSuccess: '角色已更新', - removedSuccess: '协作者已移除', - transferredSuccess: '所有权已转让', - emptyState: '暂无协作者', -}, -``` - -- [ ] **Step 3: Commit** - -```bash -git add frontend/lib/i18n/locales/en.ts frontend/lib/i18n/locales/zh.ts -git commit -m "feat(i18n): add en/zh translations for versions, collaborators, tokens" -``` - ---- - -## Task 9: UI — VersionHistoryTab component - -**Files:** -- Create: `frontend/app/skills/components/VersionHistoryTab.tsx` -- Create: `frontend/app/skills/schemas/versionPublishSchema.ts` - -- [ ] **Step 1: Create Zod schema for publish form** - -```typescript -// frontend/app/skills/schemas/versionPublishSchema.ts -import { z } from 'zod' - -export const versionPublishSchema = z.object({ - version: z - .string() - .min(1, 'Version is required') - .regex(/^\d+\.\d+\.\d+$/, 'Must be MAJOR.MINOR.PATCH format (e.g. 1.0.0)'), - release_notes: z.string().optional().default(''), -}) - -export type VersionPublishFormData = z.infer -``` - -- [ ] **Step 2: Create the VersionHistoryTab component** - -Create `frontend/app/skills/components/VersionHistoryTab.tsx`: - -```tsx -'use client' - -import { ChevronDown, ChevronUp, History, Plus, RotateCcw, Trash2 } from 'lucide-react' -import React, { useState } from 'react' -import { useForm } from 'react-hook-form' -import { zodResolver } from '@hookform/resolvers/zod' - -import { - AlertDialog, - AlertDialogAction, - AlertDialogCancel, - AlertDialogContent, - AlertDialogDescription, - AlertDialogFooter, - AlertDialogHeader, - AlertDialogTitle, -} from '@/components/ui/alert-dialog' -import { Button } from '@/components/ui/button' -import { Input } from '@/components/ui/input' -import { Label } from '@/components/ui/label' -import { Textarea } from '@/components/ui/textarea' -import { useToast } from '@/hooks/use-toast' -import { - useSkillVersions, - usePublishVersion, - useDeleteVersion, - useRestoreDraft, -} from '@/hooks/queries/skillVersions' -import { useTranslation } from '@/lib/i18n' -import { - versionPublishSchema, - type VersionPublishFormData, -} from '../schemas/versionPublishSchema' - -interface VersionHistoryTabProps { - skillId: string - /** Current user's effective role for this skill: 'owner' | 'admin' | 'publisher' | 'editor' | 'viewer' */ - userRole: string -} - -export function VersionHistoryTab({ skillId, userRole }: VersionHistoryTabProps) { - const { t } = useTranslation() - const { toast } = useToast() - - const [showPublishForm, setShowPublishForm] = useState(false) - const [confirmDialog, setConfirmDialog] = useState<{ - type: 'restore' | 'delete' - version: string - open: boolean - }>({ type: 'restore', version: '', open: false }) - - const { data: versions = [], isLoading } = useSkillVersions(skillId) - const publishMutation = usePublishVersion(skillId) - const deleteMutation = useDeleteVersion(skillId) - const restoreMutation = useRestoreDraft(skillId) - - const canPublish = ['owner', 'admin', 'publisher'].includes(userRole) - const canDelete = ['owner', 'admin'].includes(userRole) - const canRestore = ['owner', 'admin', 'publisher'].includes(userRole) - - const form = useForm({ - resolver: zodResolver(versionPublishSchema), - defaultValues: { version: '', release_notes: '' }, - }) - - const handlePublish = async (data: VersionPublishFormData) => { - try { - await publishMutation.mutateAsync(data) - toast({ title: t('skillVersions.publishedSuccess', { version: data.version }) }) - form.reset() - setShowPublishForm(false) - } catch (error: any) { - toast({ - title: error?.message || t('common.error'), - variant: 'destructive', - }) - } - } - - const handleConfirmAction = async () => { - const { type, version } = confirmDialog - try { - if (type === 'restore') { - await restoreMutation.mutateAsync(version) - toast({ title: t('skillVersions.restoredSuccess', { version }) }) - } else { - await deleteMutation.mutateAsync(version) - toast({ title: t('skillVersions.deletedSuccess', { version }) }) - } - } catch (error: any) { - toast({ title: error?.message || t('common.error'), variant: 'destructive' }) - } - setConfirmDialog((prev) => ({ ...prev, open: false })) - } - - if (isLoading) { - return ( -
-
-
- ) - } - - return ( -
- {/* Publish form toggle */} - {canPublish && ( -
- - - {showPublishForm && ( -
-
- - - {form.formState.errors.version && ( -

- {t('skillVersions.invalidVersion')} -

- )} -
-
- -