diff --git a/.claude/settings.json b/.claude/settings.json index ea559d7..69cc877 100644 --- a/.claude/settings.json +++ b/.claude/settings.json @@ -15,7 +15,17 @@ "mcp__contextstream__search", "mcp__contextstream__session", "mcp__contextstream__skill", - "mcp__contextstream__workspace" + "mcp__contextstream__workspace", + "WebSearch", + "mcp__claude_ai_Sentry__*" ] + }, + "enabledPlugins": { + "superpowers@claude-plugins-official": true, + "claude-md-management@claude-plugins-official": true, + "github@claude-plugins-official": true, + "security-guidance@claude-plugins-official": true, + "sentry@claude-plugins-official": true, + "code-review@claude-plugins-official": true } } diff --git a/.claude/worktrees/gifted-wilbur b/.claude/worktrees/gifted-wilbur new file mode 160000 index 0000000..47e7e5a --- /dev/null +++ b/.claude/worktrees/gifted-wilbur @@ -0,0 +1 @@ +Subproject commit 47e7e5a9b92c2bf894af4cbebedcb32b94fc08f2 diff --git a/.cursor/mcp.json b/.cursor/mcp.json index 9517f0e..7472375 100644 --- a/.cursor/mcp.json +++ b/.cursor/mcp.json @@ -1,28 +1,14 @@ { "mcpServers": { - "ollama": { - "command": "node", - "args": [ - "C:\\Users\\winth\\AppData\\Roaming\\npm\\node_modules\\@muhammadmehdi\\ollama-mcp-server\\dist\\index.js" - ], - "env": { - "OLLAMA_BASE_URL": "http://localhost:11434", - "OLLAMA_NUM_CTX": "131072" - } - }, "contextstream": { - "command": "npx", "args": [ - "-y", - "envmcp", - "--env-file", - ".env", - "cmd", "/c", "contextstream-mcp" ], + "command": "cmd", "env": { "CONTEXTSTREAM_ALLOW_HEADER_AUTH": "false", + "CONTEXTSTREAM_API_KEY": "cbiq_SlhoP2xlfUE-jzVnFbeOBFSoaPPvf1aFET6TI3P9DXg", "CONTEXTSTREAM_API_URL": "https://api.contextstream.io", "CONTEXTSTREAM_AUTO_HIDE_INTEGRATIONS": "true", "CONTEXTSTREAM_CONSOLIDATED": "true", @@ -32,13 +18,26 @@ "CONTEXTSTREAM_LOG_LEVEL": "quiet", "CONTEXTSTREAM_OUTPUT_FORMAT": "compact", "CONTEXTSTREAM_PROGRESSIVE_MODE": "false", + "CONTEXTSTREAM_PROJECT_ID": "253a5461-8d7a-410e-a4a8-c8923e49eddc", "CONTEXTSTREAM_ROUTER_MODE": "false", - "CONTEXTSTREAM_SEARCH_LIMIT": "16", - "CONTEXTSTREAM_SEARCH_MAX_CHARS": "1024", + "CONTEXTSTREAM_SEARCH_LIMIT": "10", + "CONTEXTSTREAM_SEARCH_MAX_CHARS": "800", "CONTEXTSTREAM_SHOW_TIMING": "false", "CONTEXTSTREAM_TOOLSET": "standard", "CONTEXTSTREAM_TRANSCRIPTS_ENABLED": "false", - "CONTEXTSTREAM_USER_AGENT": "contextstream-mcp-rust/0.1.74" + "CONTEXTSTREAM_USER_AGENT": "contextstream-mcp-rust/0.1.77", + "CONTEXTSTREAM_WORKSPACE_ID": "f5c5b873-acfb-47ec-b93b-4acabfa78a8b" + } + }, + "ollama": { + "args": [ + "-y", + "@muhammadmehdi/ollama-mcp-server" + ], + "command": "npx", + "env": { + "OLLAMA_BASE_URL": "http://localhost:11434", + "OLLAMA_NUM_CTX": "131072" } } } diff --git a/.cursor/rules/SimHub.mdc b/.cursor/rules/SimHub.mdc index ab6add9..7ba5d93 100644 --- a/.cursor/rules/SimHub.mdc +++ b/.cursor/rules/SimHub.mdc @@ -58,11 +58,12 @@ alwaysApply: false ## Deployment & Testing -- **No deploy without 100% passing tests.** The `deploy.ps1` script enforces this: build must succeed, `dotnet test` must pass (if test projects exist), and post-deploy scripts in `tests/` must exit 0. +- **No deploy without 100% passing tests.** The `deploy.ps1` script enforces this: build must succeed, `dotnet test` must pass (if test projects exist), and post-deploy scripts in `tests/` must exit 0. **`deploy.ps1` always copies all dashboard `*.html` from `src/SimSteward.Dashboard/` to `SimHub\Web\sim-steward-dash\`** (verified after copy); “deploy” includes dashboards, not DLLs-only. - **Retry-once-then-stop rule**: when a test or build fails, the agent gets **one** additional attempt to fix and rerun. If it fails again, **hard stop** — do not keep iterating. Either halt the deploy entirely (if downstream work depends on it) or skip and move on to the next independent task. - **Linter checks** (`ReadLints`) on edited files must show 0 new errors before committing or deploying. - Tests include: `dotnet build` (0 errors), `dotnet test` (0 failures), `tests/*.ps1` scripts (exit 0, `PASS:` lines only), and linter checks on changed files. - See `.cursor/skills/simsteward-deploy/SKILL.md` for the full deploy workflow and test phase details. +- **`deploy.ps1` prints plugin build id** (`=== SimSteward plugin version (deployed): … ===`, then `Deploy complete. Plugin version: …`). When the user asks the agent to run deploy, **repeat that version in the reply** (same string as WebSocket `state.pluginVersion`). ## Community References diff --git a/.cursor/settings.json b/.cursor/settings.json new file mode 100644 index 0000000..4d97eb5 --- /dev/null +++ b/.cursor/settings.json @@ -0,0 +1,7 @@ +{ + "plugins": { + "grafana-assistant": { + "enabled": true + } + } +} diff --git a/.cursor/skills/contextstream/SKILL.md b/.cursor/skills/contextstream/SKILL.md index df121c1..8d4f6c4 100644 --- a/.cursor/skills/contextstream/SKILL.md +++ b/.cursor/skills/contextstream/SKILL.md @@ -15,9 +15,35 @@ Use only when the ContextStream MCP is enabled; otherwise skip and use local too - Ops cost: keyword (2), pattern (2), semantic (5), hybrid (5). - Output format: `minimal` or `paths`. +## Semantic vs code graph (pairing) +- **Concepts / “what files matter”:** `search` with `mode` `semantic`, `hybrid`, or `auto` — not `graph` for keyword or content lookup. +- **Structure / deps / usages:** `graph` actions (`related`, `dependencies`, `impact`, `usages`, `call_path`, `path`, etc.) — not a substitute for text search; use **after** `search` narrows targets when you need edges. +- **C# / SimStewardPlugin:** Smoke tests (`graph(dependencies)` on `src/SimSteward.Plugin/SimStewardPlugin.cs`) may return **0 edges** — do not assume a rich C# call graph. Prefer **`search`** plus the **Code map** table at the top of [docs/ARCHITECTURE.md](../../docs/ARCHITECTURE.md) for module ↔ file mapping. +- **Corpus:** Rely on `project(action="index_status")` / `ingest_local` so both search and graph see the repo. After large refactors or if structural queries look empty or wrong, run **`graph(action="ingest", wait=true)`** (or queue ingest and retry later). + +## Corpus hygiene (Cursor vs ContextStream) +- **[`.cursorignore`](../../.cursorignore)** reduces noise for **Cursor** (plans, build outputs, `.claude/projects/`, `.claude/file-history/`, etc.). It is **not guaranteed** to be applied by ContextStream server ingest; treat it as **local IDE** hygiene plus a signal for what should not dominate embeddings. +- **Refresh remote index:** `npm run contextstream:ingest:force` from repo root (runs [scripts/contextstream-ingest.ps1](../../scripts/contextstream-ingest.ps1) `-Force`), or MCP **`project(action="ingest_local", path="", force=true)`**. Poll **`project(action="index_status")`** until idle/fresh. + +## Workspace binding (mapping quality) +- Open **only** this repo root as the Cursor workspace when doing ContextStream-heavy work (avoids cross-root hits under unrelated paths). +- Keep the ContextStream **project** path aligned with that same folder. + +## When to force ingest +- After **`.cursorignore`** edits, **large moves** under `src/`, **architecture doc** reshuffles, or when **`index_status`** / search results look **stale** or polluted. Then: `contextstream:ingest:force` and optionally `graph(ingest, wait=true)`. + +## Context and search density +- **`context`:** Default before tools: `format="minified"`, `max_tokens=100`, `mode="fast"` for quick turns. For deep refactors or broad changes, raise **`max_tokens`** toward **200–400** and/or use full `context(...)` without `fast`. Use **`distill=true`** or **`mode="pack"`** when the session is long or the pack must shrink; optional **`session_tokens`** / **`context_threshold`** when the client tracks cumulative usage. +- **`search`:** Prefer **`output_format`** `minimal` or `paths`; default **`limit`** **3–5** unless you need exhaustive hits. Lower **`content_max_chars`** when you only need locations; add **`context_lines`** when you need local snippet context around matches. +- **Long threads:** Keep **`session(action="compress")`** after 30+ turns or milestones (see Maintenance). + +## Deploy + plugin version (agents) +- When the user asks to **run deploy** (`deploy.ps1`), run it and **quote the plugin version** from script output: the cyan line `=== SimSteward plugin version (deployed): … ===` and/or the final `Deploy complete. Plugin version: …`. Same value is WebSocket `state.pluginVersion` and Loki `deploy_marker` JSON `detail` suffix `pluginVersion=…`. +- **Memory (semantic):** Prefer `memory(action="create_node", node_type="fact", …)` for durable deploy/testing facts; tag `simsteward`, `deploy`, `testing` when relevant. + ## Operations - **Plans:** ALWAYS `session(action="capture_plan")` + `memory(action="create_task")`. NO markdown files. -- **Repo ↔ ContextStream sync:** Use MCP **`project(action="index")`** or **`project(action="ingest_local", path="…")`** — the server-side ingest/index task. Do **not** sync via custom HTTP/API scripts, committed JSON arg files, or non-MCP CLI automation (see `docs/CONTEXTSTREAM-UPLOAD-PLAN.md`). After a sync, log with **`session(action="capture", event_type="operation", …)`**. +- **Repo ↔ ContextStream sync:** Use MCP **`project(action="index")`** or **`project(action="ingest_local", path="…")`** — the server-side ingest/index task. Prefer **`npm run contextstream:ingest[:force]`** for CLI ingest with `.env` via envmcp. Do **not** sync via ad-hoc HTTP clients or committed JSON payload dumps. After a sync, log with **`session(action="capture", event_type="operation", …)`**. - **Optional Memory mirror:** `memory(create_doc|update_doc)` only through MCP in Cursor when you intentionally duplicate a spec in Memory — not via external API clients. - **Todos:** `memory(action="create_todo")`. - **Memory/Notes:** `session(action="capture", event_type="decision|note|lesson")`. diff --git a/.cursor/skills/create-rule/SKILL.md b/.cursor/skills/create-rule/SKILL.md new file mode 100644 index 0000000..baa87c7 --- /dev/null +++ b/.cursor/skills/create-rule/SKILL.md @@ -0,0 +1,164 @@ +--- +name: create-rule +description: >- + Create Cursor rules for persistent AI guidance. Use when you want to create a + rule, add coding standards, set up project conventions, configure + file-specific patterns, create RULE.md files, or asks about .cursor/rules/ or + AGENTS.md. +--- +# Creating Cursor Rules + +Create project rules in `.cursor/rules/` to provide persistent context for the AI agent. + +## Gather Requirements + +Before creating a rule, determine: + +1. **Purpose**: What should this rule enforce or teach? +2. **Scope**: Should it always apply, or only for specific files? +3. **File patterns**: If file-specific, which glob patterns? + +### Inferring from Context + +If you have previous conversation context, infer rules from what was discussed. You can create multiple rules if the conversation covers distinct topics or patterns. Don't ask redundant questions if the context already provides the answers. + +### Required Questions + +If the user hasn't specified scope, ask: +- "Should this rule always apply, or only when working with specific files?" + +If they mentioned specific files and haven't provided concrete patterns, ask: +- "Which file patterns should this rule apply to?" (e.g., `**/*.ts`, `backend/**/*.py`) + +It's very important that we get clarity on the file patterns. + +Use the AskQuestion tool when available to gather this efficiently. + +--- + +## Rule File Format + +Rules are `.mdc` files in `.cursor/rules/` with YAML frontmatter: + +``` +.cursor/rules/ + typescript-standards.mdc + react-patterns.mdc + api-conventions.mdc +``` + +### File Structure + +```markdown +--- +description: Brief description of what this rule does +globs: **/*.ts # File pattern for file-specific rules +alwaysApply: false # Set to true if rule should always apply +--- + +# Rule Title + +Your rule content here... +``` + +### Frontmatter Fields + +| Field | Type | Description | +|-------|------|-------------| +| `description` | string | What the rule does (shown in rule picker) | +| `globs` | string | File pattern - rule applies when matching files are open | +| `alwaysApply` | boolean | If true, applies to every session | + +--- + +## Rule Configurations + +### Always Apply + +For universal standards that should apply to every conversation: + +```yaml +--- +description: Core coding standards for the project +alwaysApply: true +--- +``` + +### Apply to Specific Files + +For rules that apply when working with certain file types: + +```yaml +--- +description: TypeScript conventions for this project +globs: **/*.ts +alwaysApply: false +--- +``` + +--- + +## Best Practices + +### Keep Rules Concise + +- **Under 50 lines**: Rules should be concise and to the point +- **One concern per rule**: Split large rules into focused pieces +- **Actionable**: Write like clear internal docs +- **Concrete examples**: Ideally provide concrete examples of how to fix issues + +--- + +## Example Rules + +### TypeScript Standards + +```markdown +--- +description: TypeScript coding standards +globs: **/*.ts +alwaysApply: false +--- + +# Error Handling + +\`\`\`typescript +// ❌ BAD +try { + await fetchData(); +} catch (e) {} + +// ✅ GOOD +try { + await fetchData(); +} catch (e) { + logger.error('Failed to fetch', { error: e }); + throw new DataFetchError('Unable to retrieve data', { cause: e }); +} +\`\`\` +``` + +### React Patterns + +```markdown +--- +description: React component patterns +globs: **/*.tsx +alwaysApply: false +--- + +# React Patterns + +- Use functional components +- Extract custom hooks for reusable logic +- Colocate styles with components +``` + +--- + +## Checklist + +- [ ] File is `.mdc` format in `.cursor/rules/` +- [ ] Frontmatter configured correctly +- [ ] Content under 500 lines +- [ ] Includes concrete examples diff --git a/.cursor/skills/create-skill/SKILL.md b/.cursor/skills/create-skill/SKILL.md new file mode 100644 index 0000000..25d82cd --- /dev/null +++ b/.cursor/skills/create-skill/SKILL.md @@ -0,0 +1,498 @@ +--- +name: create-skill +description: >- + Guides users through creating effective Agent Skills for Cursor. Use when you + want to create, write, or author a new skill, or asks about skill structure, + best practices, or SKILL.md format. +--- +# Creating Skills in Cursor + +This skill guides you through creating effective Agent Skills for Cursor. Skills are markdown files that teach the agent how to perform specific tasks: reviewing PRs using team standards, generating commit messages in a preferred format, querying database schemas, or any specialized workflow. + +## Before You Begin: Gather Requirements + +Before creating a skill, gather essential information from the user about: + +1. **Purpose and scope**: What specific task or workflow should this skill help with? +2. **Target location**: Should this be a personal skill (~/.cursor/skills/) or project skill (.cursor/skills/)? +3. **Trigger scenarios**: When should the agent automatically apply this skill? +4. **Key domain knowledge**: What specialized information does the agent need that it wouldn't already know? +5. **Output format preferences**: Are there specific templates, formats, or styles required? +6. **Existing patterns**: Are there existing examples or conventions to follow? + +### Inferring from Context + +If you have previous conversation context, infer the skill from what was discussed. You can create skills based on workflows, patterns, or domain knowledge that emerged in the conversation. + +### Gathering Additional Information + +If you need clarification, use the AskQuestion tool when available: + +``` +Example AskQuestion usage: +- "Where should this skill be stored?" with options like ["Personal (~/.cursor/skills/)", "Project (.cursor/skills/)"] +- "Should this skill include executable scripts?" with options like ["Yes", "No"] +``` + +If the AskQuestion tool is not available, ask these questions conversationally. + +--- + +## Skill File Structure + +### Directory Layout + +Skills are stored as directories containing a `SKILL.md` file: + +``` +skill-name/ +├── SKILL.md # Required - main instructions +├── reference.md # Optional - detailed documentation +├── examples.md # Optional - usage examples +└── scripts/ # Optional - utility scripts + ├── validate.py + └── helper.sh +``` + +### Storage Locations + +| Type | Path | Scope | +|------|------|-------| +| Personal | ~/.cursor/skills/skill-name/ | Available across all your projects | +| Project | .cursor/skills/skill-name/ | Shared with anyone using the repository | + +**IMPORTANT**: Never create skills in `~/.cursor/skills-cursor/`. This directory is reserved for Cursor's internal built-in skills and is managed automatically by the system. + +### SKILL.md Structure + +Every skill requires a `SKILL.md` file with YAML frontmatter and markdown body: + +```markdown +--- +name: your-skill-name +description: Brief description of what this skill does and when to use it +--- + +# Your Skill Name + +## Instructions +Clear, step-by-step guidance for the agent. + +## Examples +Concrete examples of using this skill. +``` + +### Required Metadata Fields + +| Field | Requirements | Purpose | +|-------|--------------|---------| +| `name` | Max 64 chars, lowercase letters/numbers/hyphens only | Unique identifier for the skill | +| `description` | Max 1024 chars, non-empty | Helps agent decide when to apply the skill | + +--- + +## Writing Effective Descriptions + +The description is **critical** for skill discovery. The agent uses it to decide when to apply your skill. + +### Description Best Practices + +1. **Write in third person** (the description is injected into the system prompt): + - ✅ Good: "Processes Excel files and generates reports" + - ❌ Avoid: "I can help you process Excel files" + - ❌ Avoid: "You can use this to process Excel files" + +2. **Be specific and include trigger terms**: + - ✅ Good: "Extract text and tables from PDF files, fill forms, merge documents. Use when working with PDF files or when the user mentions PDFs, forms, or document extraction." + - ❌ Vague: "Helps with documents" + +3. **Include both WHAT and WHEN**: + - WHAT: What the skill does (specific capabilities) + - WHEN: When the agent should use it (trigger scenarios) + +### Description Examples + +```yaml +# PDF Processing +description: Extract text and tables from PDF files, fill forms, merge documents. Use when working with PDF files or when the user mentions PDFs, forms, or document extraction. + +# Excel Analysis +description: Analyze Excel spreadsheets, create pivot tables, generate charts. Use when analyzing Excel files, spreadsheets, tabular data, or .xlsx files. + +# Git Commit Helper +description: Generate descriptive commit messages by analyzing git diffs. Use when the user asks for help writing commit messages or reviewing staged changes. + +# Code Review +description: Review code for quality, security, and best practices following team standards. Use when reviewing pull requests, code changes, or when the user asks for a code review. +``` + +--- + +## Core Authoring Principles + +### 1. Concise is Key + +The context window is shared with conversation history, other skills, and requests. Every token competes for space. + +**Default assumption**: The agent is already very smart. Only add context it doesn't already have. + +Challenge each piece of information: +- "Does the agent really need this explanation?" +- "Can I assume the agent knows this?" +- "Does this paragraph justify its token cost?" + +**Good (concise)**: +```markdown +## Extract PDF text + +Use pdfplumber for text extraction: + +\`\`\`python +import pdfplumber + +with pdfplumber.open("file.pdf") as pdf: + text = pdf.pages[0].extract_text() +\`\`\` +``` + +**Bad (verbose)**: +```markdown +## Extract PDF text + +PDF (Portable Document Format) files are a common file format that contains +text, images, and other content. To extract text from a PDF, you'll need to +use a library. There are many libraries available for PDF processing, but we +recommend pdfplumber because it's easy to use and handles most cases well... +``` + +### 2. Keep SKILL.md Under 500 Lines + +For optimal performance, the main SKILL.md file should be concise. Use progressive disclosure for detailed content. + +### 3. Progressive Disclosure + +Put essential information in SKILL.md; detailed reference material in separate files that the agent reads only when needed. + +```markdown +# PDF Processing + +## Quick start +[Essential instructions here] + +## Additional resources +- For complete API details, see [reference.md](reference.md) +- For usage examples, see [examples.md](examples.md) +``` + +**Keep references one level deep** - link directly from SKILL.md to reference files. Deeply nested references may result in partial reads. + +### 4. Set Appropriate Degrees of Freedom + +Match specificity to the task's fragility: + +| Freedom Level | When to Use | Example | +|---------------|-------------|---------| +| **High** (text instructions) | Multiple valid approaches, context-dependent | Code review guidelines | +| **Medium** (pseudocode/templates) | Preferred pattern with acceptable variation | Report generation | +| **Low** (specific scripts) | Fragile operations, consistency critical | Database migrations | + +--- + +## Common Patterns + +### Template Pattern + +Provide output format templates: + +```markdown +## Report structure + +Use this template: + +\`\`\`markdown +# [Analysis Title] + +## Executive summary +[One-paragraph overview of key findings] + +## Key findings +- Finding 1 with supporting data +- Finding 2 with supporting data + +## Recommendations +1. Specific actionable recommendation +2. Specific actionable recommendation +\`\`\` +``` + +### Examples Pattern + +For skills where output quality depends on seeing examples: + +```markdown +## Commit message format + +**Example 1:** +Input: Added user authentication with JWT tokens +Output: +\`\`\` +feat(auth): implement JWT-based authentication + +Add login endpoint and token validation middleware +\`\`\` + +**Example 2:** +Input: Fixed bug where dates displayed incorrectly +Output: +\`\`\` +fix(reports): correct date formatting in timezone conversion + +Use UTC timestamps consistently across report generation +\`\`\` +``` + +### Workflow Pattern + +Break complex operations into clear steps with checklists: + +```markdown +## Form filling workflow + +Copy this checklist and track progress: + +\`\`\` +Task Progress: +- [ ] Step 1: Analyze the form +- [ ] Step 2: Create field mapping +- [ ] Step 3: Validate mapping +- [ ] Step 4: Fill the form +- [ ] Step 5: Verify output +\`\`\` + +**Step 1: Analyze the form** +Run: \`python scripts/analyze_form.py input.pdf\` +... +``` + +### Conditional Workflow Pattern + +Guide through decision points: + +```markdown +## Document modification workflow + +1. Determine the modification type: + + **Creating new content?** → Follow "Creation workflow" below + **Editing existing content?** → Follow "Editing workflow" below + +2. Creation workflow: + - Use docx-js library + - Build document from scratch + ... +``` + +### Feedback Loop Pattern + +For quality-critical tasks, implement validation loops: + +```markdown +## Document editing process + +1. Make your edits +2. **Validate immediately**: \`python scripts/validate.py output/\` +3. If validation fails: + - Review the error message + - Fix the issues + - Run validation again +4. **Only proceed when validation passes** +``` + +--- + +## Utility Scripts + +Pre-made scripts offer advantages over generated code: +- More reliable than generated code +- Save tokens (no code in context) +- Save time (no code generation) +- Ensure consistency across uses + +```markdown +## Utility scripts + +**analyze_form.py**: Extract all form fields from PDF +\`\`\`bash +python scripts/analyze_form.py input.pdf > fields.json +\`\`\` + +**validate.py**: Check for errors +\`\`\`bash +python scripts/validate.py fields.json +# Returns: "OK" or lists conflicts +\`\`\` +``` + +Make clear whether the agent should **execute** the script (most common) or **read** it as reference. + +--- + +## Anti-Patterns to Avoid + +### 1. Windows-Style Paths +- ✅ Use: `scripts/helper.py` +- ❌ Avoid: `scripts\helper.py` + +### 2. Too Many Options +```markdown +# Bad - confusing +"You can use pypdf, or pdfplumber, or PyMuPDF, or..." + +# Good - provide a default with escape hatch +"Use pdfplumber for text extraction. +For scanned PDFs requiring OCR, use pdf2image with pytesseract instead." +``` + +### 3. Time-Sensitive Information +```markdown +# Bad - will become outdated +"If you're doing this before August 2025, use the old API." + +# Good - use an "old patterns" section +## Current method +Use the v2 API endpoint. + +## Old patterns (deprecated) +
+Legacy v1 API +... +
+``` + +### 4. Inconsistent Terminology +Choose one term and use it throughout: +- ✅ Always "API endpoint" (not mixing "URL", "route", "path") +- ✅ Always "field" (not mixing "box", "element", "control") + +### 5. Vague Skill Names +- ✅ Good: `processing-pdfs`, `analyzing-spreadsheets` +- ❌ Avoid: `helper`, `utils`, `tools` + +--- + +## Skill Creation Workflow + +When helping a user create a skill, follow this process: + +### Phase 1: Discovery + +Gather information about: +1. The skill's purpose and primary use case +2. Storage location (personal vs project) +3. Trigger scenarios +4. Any specific requirements or constraints +5. Existing examples or patterns to follow + +If you have access to the AskQuestion tool, use it for efficient structured gathering. Otherwise, ask conversationally. + +### Phase 2: Design + +1. Draft the skill name (lowercase, hyphens, max 64 chars) +2. Write a specific, third-person description +3. Outline the main sections needed +4. Identify if supporting files or scripts are needed + +### Phase 3: Implementation + +1. Create the directory structure +2. Write the SKILL.md file with frontmatter +3. Create any supporting reference files +4. Create any utility scripts if needed + +### Phase 4: Verification + +1. Verify the SKILL.md is under 500 lines +2. Check that the description is specific and includes trigger terms +3. Ensure consistent terminology throughout +4. Verify all file references are one level deep +5. Test that the skill can be discovered and applied + +--- + +## Complete Example + +Here's a complete example of a well-structured skill: + +**Directory structure:** +``` +code-review/ +├── SKILL.md +├── STANDARDS.md +└── examples.md +``` + +**SKILL.md:** +```markdown +--- +name: code-review +description: Review code for quality, security, and maintainability following team standards. Use when reviewing pull requests, examining code changes, or when the user asks for a code review. +--- + +# Code Review + +## Quick Start + +When reviewing code: + +1. Check for correctness and potential bugs +2. Verify security best practices +3. Assess code readability and maintainability +4. Ensure tests are adequate + +## Review Checklist + +- [ ] Logic is correct and handles edge cases +- [ ] No security vulnerabilities (SQL injection, XSS, etc.) +- [ ] Code follows project style conventions +- [ ] Functions are appropriately sized and focused +- [ ] Error handling is comprehensive +- [ ] Tests cover the changes + +## Providing Feedback + +Format feedback as: +- 🔴 **Critical**: Must fix before merge +- 🟡 **Suggestion**: Consider improving +- 🟢 **Nice to have**: Optional enhancement + +## Additional Resources + +- For detailed coding standards, see [STANDARDS.md](STANDARDS.md) +- For example reviews, see [examples.md](examples.md) +``` + +--- + +## Summary Checklist + +Before finalizing a skill, verify: + +### Core Quality +- [ ] Description is specific and includes key terms +- [ ] Description includes both WHAT and WHEN +- [ ] Written in third person +- [ ] SKILL.md body is under 500 lines +- [ ] Consistent terminology throughout +- [ ] Examples are concrete, not abstract + +### Structure +- [ ] File references are one level deep +- [ ] Progressive disclosure used appropriately +- [ ] Workflows have clear steps +- [ ] No time-sensitive information + +### If Including Scripts +- [ ] Scripts solve problems rather than punt +- [ ] Required packages are documented +- [ ] Error handling is explicit and helpful +- [ ] No Windows-style paths diff --git a/.cursor/skills/create-subagent/SKILL.md b/.cursor/skills/create-subagent/SKILL.md new file mode 100644 index 0000000..05cfc50 --- /dev/null +++ b/.cursor/skills/create-subagent/SKILL.md @@ -0,0 +1,225 @@ +--- +name: create-subagent +description: >- + Create custom subagents for specialized AI tasks. Use when you want to create + a new type of subagent, set up task-specific agents, configure code reviewers, + debuggers, or domain-specific assistants with custom prompts. +disable-model-invocation: true +--- +# Creating Custom Subagents + +This skill guides you through creating custom subagents for Cursor. Subagents are specialized AI assistants that run in isolated contexts with custom system prompts. + +## When to Use Subagents + +Subagents help you: +- **Preserve context** by isolating exploration from your main conversation +- **Specialize behavior** with focused system prompts for specific domains +- **Reuse configurations** across projects with user-level subagents + +### Inferring from Context + +If you have previous conversation context, infer the subagent's purpose and behavior from what was discussed. Create the subagent based on specialized tasks or workflows that emerged in the conversation. + +## Subagent Locations + +| Location | Scope | Priority | +|----------|-------|----------| +| `.cursor/agents/` | Current project | Higher | +| `~/.cursor/agents/` | All your projects | Lower | + +When multiple subagents share the same name, the higher-priority location wins. + +**Project subagents** (`.cursor/agents/`): Ideal for codebase-specific agents. Check into version control to share with your team. + +**User subagents** (`~/.cursor/agents/`): Personal agents available across all your projects. + +## Subagent File Format + +Create a `.md` file with YAML frontmatter and a markdown body (the system prompt): + +```markdown +--- +name: code-reviewer +description: Reviews code for quality and best practices +--- + +You are a code reviewer. When invoked, analyze the code and provide +specific, actionable feedback on quality, security, and best practices. +``` + +### Required Fields + +| Field | Description | +|-------|-------------| +| `name` | Unique identifier (lowercase letters and hyphens only) | +| `description` | When to delegate to this subagent (be specific!) | + +## Writing Effective Descriptions + +The description is **critical** - the AI uses it to decide when to delegate. + +```yaml +# ❌ Too vague +description: Helps with code + +# ✅ Specific and actionable +description: Expert code review specialist. Proactively reviews code for quality, security, and maintainability. Use immediately after writing or modifying code. +``` + +Include "use proactively" to encourage automatic delegation. + +## Example Subagents + +### Code Reviewer + +```markdown +--- +name: code-reviewer +description: Expert code review specialist. Proactively reviews code for quality, security, and maintainability. Use immediately after writing or modifying code. +--- + +You are a senior code reviewer ensuring high standards of code quality and security. + +When invoked: +1. Run git diff to see recent changes +2. Focus on modified files +3. Begin review immediately + +Review checklist: +- Code is clear and readable +- Functions and variables are well-named +- No duplicated code +- Proper error handling +- No exposed secrets or API keys +- Input validation implemented +- Good test coverage +- Performance considerations addressed + +Provide feedback organized by priority: +- Critical issues (must fix) +- Warnings (should fix) +- Suggestions (consider improving) + +Include specific examples of how to fix issues. +``` + +### Debugger + +```markdown +--- +name: debugger +description: Debugging specialist for errors, test failures, and unexpected behavior. Use proactively when encountering any issues. +--- + +You are an expert debugger specializing in root cause analysis. + +When invoked: +1. Capture error message and stack trace +2. Identify reproduction steps +3. Isolate the failure location +4. Implement minimal fix +5. Verify solution works + +Debugging process: +- Analyze error messages and logs +- Check recent code changes +- Form and test hypotheses +- Add strategic debug logging +- Inspect variable states + +For each issue, provide: +- Root cause explanation +- Evidence supporting the diagnosis +- Specific code fix +- Testing approach +- Prevention recommendations + +Focus on fixing the underlying issue, not the symptoms. +``` + +### Data Scientist + +```markdown +--- +name: data-scientist +description: Data analysis expert for SQL queries, BigQuery operations, and data insights. Use proactively for data analysis tasks and queries. +--- + +You are a data scientist specializing in SQL and BigQuery analysis. + +When invoked: +1. Understand the data analysis requirement +2. Write efficient SQL queries +3. Use BigQuery command line tools (bq) when appropriate +4. Analyze and summarize results +5. Present findings clearly + +Key practices: +- Write optimized SQL queries with proper filters +- Use appropriate aggregations and joins +- Include comments explaining complex logic +- Format results for readability +- Provide data-driven recommendations + +For each analysis: +- Explain the query approach +- Document any assumptions +- Highlight key findings +- Suggest next steps based on data + +Always ensure queries are efficient and cost-effective. +``` + +## Subagent Creation Workflow + +### Step 1: Decide the Scope + +- **Project-level** (`.cursor/agents/`): For codebase-specific agents shared with team +- **User-level** (`~/.cursor/agents/`): For personal agents across all projects + +### Step 2: Create the File + +```bash +# For project-level +mkdir -p .cursor/agents +touch .cursor/agents/my-agent.md + +# For user-level +mkdir -p ~/.cursor/agents +touch ~/.cursor/agents/my-agent.md +``` + +### Step 3: Define Configuration + +Write the frontmatter with the required fields (`name` and `description`). + +### Step 4: Write the System Prompt + +The body becomes the system prompt. Be specific about: +- What the agent should do when invoked +- The workflow or process to follow +- Output format and structure +- Any constraints or guidelines + +### Step 5: Test the Agent + +Ask the AI to use your new agent: + +``` +Use the my-agent subagent to [task description] +``` + +## Best Practices + +1. **Design focused subagents**: Each should excel at one specific task +2. **Write detailed descriptions**: Include trigger terms so the AI knows when to delegate +3. **Check into version control**: Share project subagents with your team +4. **Use proactive language**: Include "use proactively" in descriptions + +## Troubleshooting + +### Subagent Not Found +- Ensure file is in `.cursor/agents/` or `~/.cursor/agents/` +- Check file has `.md` extension +- Verify YAML frontmatter syntax is valid diff --git a/.cursor/skills/grafana-loki-observability/SKILL.md b/.cursor/skills/grafana-loki-observability/SKILL.md index aaafd2a..0baa3ec 100644 --- a/.cursor/skills/grafana-loki-observability/SKILL.md +++ b/.cursor/skills/grafana-loki-observability/SKILL.md @@ -23,5 +23,5 @@ description: Grafana/Loki observability stack expert. - **AI Filter:** ALWAYS append `| level != "DEBUG"`. ## Stack -- **Grafana:** No repo-provisioned dashboard JSON; use **Explore** + LogQL in **docs/GRAFANA-LOGGING.md**. Local wipe: **docs/observability-local.md** § Housekeeping. +- **Grafana:** **Explore** + LogQL per **docs/GRAFANA-LOGGING.md**; local compose also provisions **Sim Steward — Deploy health** ([observability/local/grafana/provisioning/dashboards/simsteward-deploy-health.json](../../observability/local/grafana/provisioning/dashboards/simsteward-deploy-health.json)) for `deploy.ps1` / plugin bring-up correlation. Local wipe: **docs/observability-local.md** § Housekeeping. - **Local:** `observability/local/`. Loki on 3100. Use `SIMSTEWARD_LOKI_URL=http://localhost:3100`, `SIMSTEWARD_LOG_ENV=local`. \ No newline at end of file diff --git a/.cursor/skills/migrate-to-skills/SKILL.md b/.cursor/skills/migrate-to-skills/SKILL.md new file mode 100644 index 0000000..1cb37f8 --- /dev/null +++ b/.cursor/skills/migrate-to-skills/SKILL.md @@ -0,0 +1,134 @@ +--- +name: migrate-to-skills +description: >- + Convert 'Applied intelligently' Cursor rules (.cursor/rules/*.mdc) and slash + commands (.cursor/commands/*.md) to Agent Skills format (.cursor/skills/). Use + when you want to migrate rules or commands to skills, convert .mdc rules to + SKILL.md format, or consolidate commands into the skills directory. +disable-model-invocation: true +--- +# Migrate Rules and Slash Commands to Skills + +Convert Cursor rules ("Applied intelligently") and slash commands to Agent Skills format. + +**CRITICAL: Preserve the exact body content. Do not modify, reformat, or "improve" it - copy verbatim.** + +## Locations + +| Level | Source | Destination | +|-------|--------|-------------| +| Project | `{workspaceFolder}/**/.cursor/rules/*.mdc`, `{workspaceFolder}/.cursor/commands/*.md` | +| User | `~/.cursor/commands/*.md` | + +Notes: +- Cursor rules inside the project can live in nested directories. Be thorough in your search and use glob patterns to find them. +- Ignore anything in ~/.cursor/worktrees +- Ignore anything in ~/.cursor/skills-cursor. This is reserved for Cursor's internal built-in skills and is managed automatically by the system. + +## Finding Files to Migrate + +**Rules**: Migrate if rule has a `description` but NO `globs` and NO `alwaysApply: true`. + +**Commands**: Migrate all - they're plain markdown without frontmatter. + +## Conversion Format + +### Rules: .mdc → SKILL.md + +```markdown +# Before: .cursor/rules/my-rule.mdc +--- +description: What this rule does +globs: +alwaysApply: false +--- +# Title +Body content... +``` + +```markdown +# After: .cursor/skills/my-rule/SKILL.md +--- +name: my-rule +description: What this rule does +--- +# Title +Body content... +``` + +Changes: Add `name` field, remove `globs`/`alwaysApply`, keep body exactly. + +### Commands: .md → SKILL.md + +```markdown +# Before: .cursor/commands/commit.md +# Commit current work +Instructions here... +``` + +```markdown +# After: .cursor/skills/commit/SKILL.md +--- +name: commit +description: Commit current work with standardized message format +disable-model-invocation: true +--- +# Commit current work +Instructions here... +``` + +Changes: Add frontmatter with `name` (from filename), `description` (infer from content), and `disable-model-invocation: true`, keep body exactly. + +**Note:** The `disable-model-invocation: true` field prevents the model from automatically invoking this skill. Slash commands are designed to be explicitly triggered by the user via the `/` menu, not automatically suggested by the model. + +## Notes + +- `name` must be lowercase with hyphens only +- `description` is critical for skill discovery +- Optionally delete originals after verifying migration works + +### Migrate a Rule (.mdc → SKILL.md) + +1. Read the rule file +2. Extract the `description` from the frontmatter +3. Extract the body content (everything after the closing `---` of the frontmatter) +4. Create the skill directory: `.cursor/skills/{skill-name}/` (skill name = filename without .mdc) +5. Write `SKILL.md` with new frontmatter (`name` and `description`) + the EXACT original body content (preserve all whitespace, formatting, code blocks verbatim) +6. Delete the original rule file + +### Migrate a Command (.md → SKILL.md) + +1. Read the command file +2. Extract description from the first heading (remove `#` prefix) +3. Create the skill directory: `.cursor/skills/{skill-name}/` (skill name = filename without .md) +4. Write `SKILL.md` with new frontmatter (`name`, `description`, and `disable-model-invocation: true`) + blank line + the EXACT original file content (preserve all whitespace, formatting, code blocks verbatim) +5. Delete the original command file + +**CRITICAL: Copy the body content character-for-character. Do not reformat, fix typos, or "improve" anything.** + +## Workflow + +If you have the Task tool available: +DO NOT start to read all of the files yourself. That function should be delegated to the subagents. Your job is to dispatch the subagents for each category of files and wait for the results. + +1. [ ] Create the skills directories if they don't exist (`.cursor/skills/` for project, `~/.cursor/skills/` for user) +2. Dispatch three fast general purpose subagents (NOT explore) in parallel to do the following steps for project rules (pattern: `{workspaceFolder}/**/.cursor/rules/*.mdc`), user commands (pattern: `~/.cursor/commands/*.md`), and project commands (pattern: `{workspaceFolder}/**/.cursor/commands/*.md`): + I. [ ] Find files to migrate in the given pattern + II. [ ] For rules, check if it's an "applied intelligently" rule (has `description`, no `globs`, no `alwaysApply: true`). Commands are always migrated. DO NOT use the terminal to read files. Use the read tool. + III. [ ] Make a list of files to migrate. If empty, done. + IV. [ ] For each file, read it, then write the new skill file preserving the body content EXACTLY. DO NOT use the terminal to write these files. Use the edit tool. + V. [ ] Delete the original file. DO NOT use the terminal to delete these files. Use the delete tool. + VI. [ ] Return a list of all the skill files that were migrated along with the original file paths. +3. [ ] Wait for all subagents to complete and summarize the results to the user. IMPORTANT: Make sure to let them know if they want to undo the migration, to ask you to. +4. [ ] If the user asks you to undo the migration, do the opposite of the above steps to restore the original files. + + +If you don't have the Task tool available: +1. [ ] Create the skills directories if they don't exist (`.cursor/skills/` for project, `~/.cursor/skills/` for user) +2. [ ] Find files to migrate in both project (`.cursor/`) and user (`~/.cursor/`) directories +3. [ ] For rules, check if it's an "applied intelligently" rule (has `description`, no `globs`, no `alwaysApply: true`). Commands are always migrated. DO NOT use the terminal to read files. Use the read tool. +4. [ ] Make a list of files to migrate. If empty, done. +5. [ ] For each file, read it, then write the new skill file preserving the body content EXACTLY. DO NOT use the terminal to write these files. Use the edit tool. +6. [ ] Delete the original file. DO NOT use the terminal to delete these files. Use the delete tool. +7. [ ] Summarize the results to the user. IMPORTANT: Make sure to let them know if they want to undo the migration, to ask you to. +8. [ ] If the user asks you to undo the migration, do the opposite of the above steps to restore the original files. diff --git a/.cursor/skills/sdk-capture-verify/SKILL.md b/.cursor/skills/sdk-capture-verify/SKILL.md new file mode 100644 index 0000000..5dc2f68 --- /dev/null +++ b/.cursor/skills/sdk-capture-verify/SKILL.md @@ -0,0 +1,103 @@ +--- +name: sdk-capture-verify +description: Verify SDK Data Capture Suite test results against local disk logs and Grafana Loki. Identifies three gap categories — on-disk-not-Loki, Loki-not-disk, both-exist-but-inconsistent. +--- + +# SDK Capture Verify + +Manually verifies that a data capture test run made it correctly to all sinks. + +## What to do + +### 1. Read credentials from `.env` + +Read `c:\Users\winth\dev\sim-steward\simhub-plugin\.env` and extract: +- `SIMSTEWARD_LOKI_URL` — e.g. `https://logs-prod-us-east-0.grafana.net` +- `SIMSTEWARD_LOKI_USER` — numeric user ID +- `CURSOR_ELEVATED_GRAFANA_TOKEN` — elevated read key (use as HTTP Basic password) +- `GRAFANA_API_TOKEN` — fallback + +### 2. Find `test_run_id` + +If the user provided a `test_run_id`, use it. +Otherwise read the last 500 lines of `%LocalAppData%\SimSteward\plugin-structured.jsonl` (expand `%LocalAppData%` to the Windows path `C:\Users\\AppData\Local`) and find the most recent line containing `"event":"sdk_capture_suite_started"`. Extract its `test_run_id` field. + +### 3. Read local disk logs + +Read **all** lines from `plugin-structured.jsonl` where `test_run_id` matches. +Also scan `C:\Users\winth\AppData\Local\SimSteward\replay-incident-index\record-samples\` for `.ndjson` files that contain the matching `test_run_id` (these are 60 Hz raw samples — note their presence but do NOT expect them in Loki). + +Group disk events by `event` field and note: +- Total matching line count +- Unique `event` values and their counts + +### 4. Query local Loki (if reachable) + +``` +GET http://localhost:3100/loki/api/v1/query_range + ?query={app="sim-steward"}|json|test_run_id="" + &start=&end= + &limit=5000 +``` + +Use `WebFetch` for all HTTP calls. Group results by `event` and `test_tag`. + +### 5. Query Grafana Cloud Loki + +``` +GET https://logs-prod-us-east-0.grafana.net/loki/api/v1/query_range + ?query={app="sim-steward"}|json|test_run_id="" + &start=&end= + &limit=5000 +Authorization: Basic base64(SIMSTEWARD_LOKI_USER:CURSOR_ELEVATED_GRAFANA_TOKEN) +``` + +Use `WebFetch`. Group results the same way. + +### 6. Gap analysis — three categories + +**Gap 1 — On disk, missing from both Loki instances:** +List each `event`+`test_tag` that appears in the JSONL file but in neither local Loki nor Cloud Loki. +Include disk timestamp and key fields: `car_idx`, `replay_frame`, `test_tag`. +Likely causes: Alloy file-tail lag, network drop, not yet ingested. + +**Gap 2 — In Loki, missing from disk:** +List events found in Loki but absent from the JSONL file. +Likely cause: in-memory push succeeded but disk flush failed. + +**Gap 3 — Both exist but field values disagree:** +For each event that exists in both disk and Loki, compare: +- `car_idx`, `replay_frame`, `replay_session_time`, `cam_car_idx`, `test_run_id`, `detection_source` + +Flag any field where the disk value ≠ Loki value. +**Note:** Frequency/rate discrepancies (e.g. different counts of the same event) are informational only — focus on value accuracy for matched events. + +### 7. Output a structured report + +``` +## SDK Capture Verify Report +Test Run ID: +Disk events: N | Local Loki: N | Cloud Loki: N + +### Gap 1: On disk, missing from Loki + + +### Gap 2: In Loki, missing from disk + + +### Gap 3: Inconsistencies + + +### Summary +- Total gaps: N +- Accuracy score: N/N events fully consistent +- Grafana Explore link: https://...grafana.net/explore?... +``` + +## Notes + +- Use `WebFetch` for all Loki HTTP calls (GET with Authorization header where needed). +- If Loki returns 401, remind the user to check `CURSOR_ELEVATED_GRAFANA_TOKEN` in `.env`. +- If no `test_run_id` found on disk, prompt the user to run the suite first via `data-capture-suite.html`. +- The 60 Hz record samples (`.ndjson` files in `record-samples/`) are cross-referenced for completeness but are NOT expected in Loki — their absence from Loki is normal behaviour, not a gap. +- Unix nanosecond timestamps: `Date.now()` in JS gives ms; multiply by 1,000,000 for ns. For Claude: use current UTC time minus 1 hour as start, current UTC as end. diff --git a/.cursor/skills/shell/SKILL.md b/.cursor/skills/shell/SKILL.md new file mode 100644 index 0000000..bcf9bcd --- /dev/null +++ b/.cursor/skills/shell/SKILL.md @@ -0,0 +1,24 @@ +--- +name: shell +description: >- + Runs the rest of a /shell request as a literal shell command. Use only when + the user explicitly invokes /shell and wants the following text executed + directly in the terminal. +disable-model-invocation: true +--- +# Run Shell Commands + +Use this skill only when the user explicitly invokes `/shell`. + +## Behavior + +1. Treat all user text after the `/shell` invocation as the literal shell command to run. +2. Execute that command immediately with the terminal tool. +3. Do not rewrite, explain, or "improve" the command before running it. +4. Do not inspect the repository first unless the command itself requires repository context. +5. If the user invokes `/shell` without any following text, ask them which command to run. + +## Response + +- Run the command first. +- Then briefly report the exit status and any important stdout or stderr. diff --git a/.cursor/skills/simsteward-deploy/SKILL.md b/.cursor/skills/simsteward-deploy/SKILL.md index d0cd095..b184ac4 100644 --- a/.cursor/skills/simsteward-deploy/SKILL.md +++ b/.cursor/skills/simsteward-deploy/SKILL.md @@ -5,12 +5,13 @@ description: Deploy/Watch commands for SimHub plugins. # SimSteward Deploy Workflow ## Quick start -- **Manual deploy:** `pwsh -File .\deploy.ps1` -- **Watch deploy:** `pwsh -File .\scripts\watch-deploy.ps1` +- **Manual deploy:** `pwsh -File .\deploy.ps1` — optional **`-EnvFile`** `C:\path\to\secrets.env` or repo-relative (`.env.prod`); merges `observability/local/.env.observability.local` after. +- **Watch deploy:** `pwsh -File .\scripts\watch-deploy.ps1` — same **`-EnvFile`** passthrough to deploy. +- **Agent reply:** After deploy, tell the user the **plugin version** printed by the script (`=== SimSteward plugin version (deployed): … ===` and `Deploy complete. Plugin version: …`). Matches SimHub dashboard / WebSocket `pluginVersion`. ## Locations - **Plugin:** `C:\Program Files (x86)\SimHub\` (or `$env:SIMHUB_PATH`) -- **Dashboard:** `SimHub\Web\sim-steward-dash\` (served at `http://:8888/Web/sim-steward-dash/index.html`) +- **Dashboards (always part of deploy):** Every `*.html` in `src/SimSteward.Dashboard/` is copied to `SimHub\Web\sim-steward-dash\` plus `README.txt`. Verification fails if any source `.html` is missing or empty on disk after copy. Served when SimHub HTTP is up: `http://:8888/Web/sim-steward-dash/.html` (main: `index.html`). ## Testing Gate - Deploy MUST pass 100%. Pipeline enforces: diff --git a/.cursor/skills/update-cursor-settings/SKILL.md b/.cursor/skills/update-cursor-settings/SKILL.md new file mode 100644 index 0000000..7fac454 --- /dev/null +++ b/.cursor/skills/update-cursor-settings/SKILL.md @@ -0,0 +1,119 @@ +--- +name: update-cursor-settings +description: >- + Modify Cursor/VSCode user settings in settings.json. Use when you want to + change editor settings, preferences, configuration, themes, font size, tab + size, format on save, auto save, keybindings, or any settings.json values. +--- +# Updating Cursor Settings + +This skill guides you through modifying Cursor/VSCode user settings. Use this when you want to change editor settings, preferences, configuration, themes, keybindings, or any `settings.json` values. + +## Settings File Location + +| OS | Path | +|----|------| +| macOS | ~/Library/Application Support/Cursor/User/settings.json | +| Linux | ~/.config/Cursor/User/settings.json | +| Windows | %APPDATA%\Cursor\User\settings.json | + +## Before Modifying Settings + +1. **Read the existing settings file** to understand current configuration +2. **Preserve existing settings** - only add/modify what the user requested +3. **Validate JSON syntax** before writing to avoid breaking the editor + +## Modifying Settings + +### Step 1: Read Current Settings + +```typescript +// Read the settings file first +const settingsPath = "~/Library/Application Support/Cursor/User/settings.json"; +// Use the Read tool to get current contents +``` + +### Step 2: Identify the Setting to Change + +Common setting categories: +- **Editor**: `editor.fontSize`, `editor.tabSize`, `editor.wordWrap`, `editor.formatOnSave` +- **Workbench**: `workbench.colorTheme`, `workbench.iconTheme`, `workbench.sideBar.location` +- **Files**: `files.autoSave`, `files.exclude`, `files.associations` +- **Terminal**: `terminal.integrated.fontSize`, `terminal.integrated.shell.*` +- **Cursor-specific**: Settings prefixed with `cursor.` or `aipopup.` + +### Step 3: Update the Setting + +When modifying settings.json: +1. Parse the existing JSON (handle comments - VSCode settings support JSON with comments) +2. Add or update the requested setting +3. Preserve all other existing settings +4. Write back with proper formatting (2-space indentation) + +### Example: Changing Font Size + +If user says "make the font bigger": + +```json +{ + "editor.fontSize": 16 +} +``` + +### Example: Enabling Format on Save + +If user says "format my code when I save": + +```json +{ + "editor.formatOnSave": true +} +``` + +### Example: Changing Theme + +If user says "use dark theme" or "change my theme": + +```json +{ + "workbench.colorTheme": "Default Dark Modern" +} +``` + +## Important Notes + +1. **JSON with Comments**: VSCode/Cursor settings.json supports comments (`//` and `/* */`). When reading, be aware comments may exist. When writing, preserve comments if possible. + +2. **Restart May Be Required**: Some settings take effect immediately, others require reloading the window or restarting Cursor. Inform the user if a restart is needed. + +3. **Backup**: For significant changes, consider mentioning the user can undo via Ctrl/Cmd+Z in the settings file or by reverting git changes if tracked. + +4. **Workspace vs User Settings**: + - User settings (what this skill covers): Apply globally to all projects + - Workspace settings (`.vscode/settings.json`): Apply only to the current project + +5. **Commit Attribution**: When the user asks about commit attribution, clarify whether they want to edit the **CLI agent** or the **IDE agent**. For the CLI agent, modify `~/.cursor/cli-config.json`. For the IDE agent, it is controlled from the UI at **Cursor Settings > Agent > Attribution** (not settings.json). + +## Common User Requests → Settings + +| User Request | Setting | +|--------------|---------| +| "bigger/smaller font" | `editor.fontSize` | +| "change tab size" | `editor.tabSize` | +| "format on save" | `editor.formatOnSave` | +| "word wrap" | `editor.wordWrap` | +| "change theme" | `workbench.colorTheme` | +| "hide minimap" | `editor.minimap.enabled` | +| "auto save" | `files.autoSave` | +| "line numbers" | `editor.lineNumbers` | +| "bracket matching" | `editor.bracketPairColorization.enabled` | +| "cursor style" | `editor.cursorStyle` | +| "smooth scrolling" | `editor.smoothScrolling` | + +## Workflow + +1. Read ~/Library/Application Support/Cursor/User/settings.json +2. Parse the JSON content +3. Add/modify the requested setting(s) +4. Write the updated JSON back to the file +5. Inform the user the setting has been changed and whether a reload is needed diff --git a/.cursorignore b/.cursorignore index 088ffef..198ccfe 100644 --- a/.cursorignore +++ b/.cursorignore @@ -1,3 +1,53 @@ -# Reduce agent indexing noise +# Reduce Cursor/agent indexing noise (semantic search quality). +# Align with .gitignore where practical. ContextStream CLI ingest may use its own +# filters; after changing this file, run npm run contextstream:ingest:force so +# the remote index matches (see .cursor/skills/contextstream/SKILL.md). + docs/marketing/ +# Plans and local IDE caches (high keyword noise, not runtime source) +.cursor/plans/ +.cursor/worktrees/ +.cursor/debug-*.log + +# Claude Code local noise (committed .claude/*.json at repo root stay visible) +.claude/projects/ +.claude/file-history/ + +# Build and package outputs +bin/ +obj/ +**/bin/ +**/obj/ +[Dd]ebug/ +[Rr]elease/ +packages/ +.vs/ + +# Node and worker artifacts +node_modules/ +scripts/obs-bridge/node_modules/ +worker/node_modules/ +worker/.wrangler/ + +# ContextStream client-local (not indexed usefully) +.contextstream/ + +# Temp / one-off tooling +temp-mcp-client/ +temp-rpc-docs/ + +# Local observability Docker data +observability/local/.storage/ + +# One-off MCP payload dumps (repo root) +/_ctx_*.json +/_grafana_create.json +/_mcp_memory_args.json +/_mem_update_arch.json +/_push_pf.json +/_test_list.json +/_tmp_arch.json + +# Logs +debug-*.log diff --git a/.env.example b/.env.example index 497f6a1..f4194fe 100644 --- a/.env.example +++ b/.env.example @@ -1,5 +1,11 @@ # SimSteward plugin/dashboard local environment template # Copy to ".env" for local testing only. Do NOT commit real secrets. +# +# Loaded automatically via scripts/load-dotenv.ps1 (Import-DotEnv) by: +# deploy.ps1, run-simhub-local-observability.ps1, send-deploy-loki-marker.ps1, +# poll-loki.ps1, validate-grafana-logs.ps1, grafana-bootstrap.ps1, publish-dashboards.ps1 +# Use a different file: .\deploy.ps1 -EnvFile C:\path\to\secrets.env (or repo-relative, e.g. .env.prod) +# Second file observability/local/.env.observability.local is merged after the primary file if present (e.g. LOKI_PUSH_TOKEN). # Plugin WebSocket runtime SIMSTEWARD_WS_PORT=19847 @@ -19,7 +25,9 @@ SEEK_BEFORE_RECORD=1 RECORD_DURATION_SEC=30 # Grafana Loki (see docs/GRAFANA-LOGGING.md) -SIMSTEWARD_LOKI_URL=https://logs-prod-us-east-0.grafana.net +# deploy.ps1 posts deploy_marker: defaults to http://localhost:3100 if URL unset, or if URL is *.grafana.net without SIMSTEWARD_LOKI_USER+TOKEN (avoids 530 from template .env). +# Auth: Basic USER+TOKEN for Grafana Cloud; Bearer LOKI_PUSH_TOKEN for local gateway :3500; direct :3100 needs no auth. +# SIMSTEWARD_LOKI_URL=https://logs-prod-us-east-0.grafana.net SIMSTEWARD_LOKI_USER= # example: 1484376 SIMSTEWARD_LOKI_TOKEN= # example: glc_... SIMSTEWARD_LOG_ENV=production # label: production | local @@ -59,6 +67,23 @@ SIMSTEWARD_LOG_DEBUG= # set to 1 for local debug mode only # GRAFANA_ADMIN_PASSWORD=admin # GRAFANA_LOKI_DATASOURCE_UID=loki_local # provisioning default; override if renamed +# --- OTLP / Prometheus (local Docker stack; see docs/observability-local.md) --- +# Plugin sends OpenTelemetry metrics when one of these is set (before SimHub starts): +# OTEL_EXPORTER_OTLP_ENDPOINT=http://127.0.0.1:4317 +# SIMSTEWARD_OTLP_ENDPOINT=http://127.0.0.1:4317 +# For OTLP HTTP on 4318: also set OTEL_EXPORTER_OTLP_PROTOCOL=http/protobuf +# Prometheus UI (host): http://localhost:9090 — Grafana datasource UID: prometheus_local + +# --- Sentry (error tracking + release tracking) --- +# 3 separate Sentry projects — each component has its own DSN: +# simhub-plugin: C# plugin (DSN used in plugin Init, configurable via env var) +# index-dashboard: Main dashboard (DSN hardcoded in index.html) +# test-dashboard: Data capture suite (DSN hardcoded in data-capture-suite.html) +# Plugin DSN (overrides hardcoded default): +# SIMSTEWARD_SENTRY_DSN=https://ab2d0a6f7cd97033a46f4fa7d90dabab@o4511097126780928.ingest.us.sentry.io/4511102961319936 +# Auth token for deploy.ps1 release/deploy tracking (Organization:Read, Release:Admin scopes): +# SENTRY_AUTH_TOKEN=sntrys_... + # --- Local observability stack (observability/local/docker-compose.yml) --- -# GRAFANA_STORAGE_PATH= # e.g. S:/sim-steward-grafana-storage (Loki, Grafana, local stack data) +# GRAFANA_STORAGE_PATH= # e.g. S:/sim-steward-grafana-storage (Loki, Grafana, Prometheus TSDB, local stack data) # LOKI_PUSH_TOKEN= # optional; if Loki gateway requires auth diff --git a/.github/workflows/secrets-scan.yml b/.github/workflows/secrets-scan.yml new file mode 100644 index 0000000..e8380c1 --- /dev/null +++ b/.github/workflows/secrets-scan.yml @@ -0,0 +1,29 @@ +# Reproduce locally: npm run secrets:lint ; npm run secrets:gitleaks (Docker required for Gitleaks script). +name: Secrets scan + +on: + push: + branches: [main, master] + pull_request: + +jobs: + secretlint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-node@v4 + with: + node-version: "20" + cache: npm + - run: npm ci + - run: npm run secrets:lint + + gitleaks: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - uses: gitleaks/gitleaks-action@v2 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.gitignore b/.gitignore index c8e13f0..febfe10 100644 --- a/.gitignore +++ b/.gitignore @@ -25,6 +25,7 @@ Thumbs.db .env .env.* !.env.example +!.env.observability.example # Node node_modules/ diff --git a/.mcp.json b/.mcp.json index f0ccf50..ac4b589 100644 --- a/.mcp.json +++ b/.mcp.json @@ -1,33 +1,14 @@ { "mcpServers": { - "ollama": { - "command": "node", - "args": [ - "C:\\Users\\winth\\AppData\\Roaming\\npm\\node_modules\\@muhammadmehdi\\ollama-mcp-server\\dist\\index.js" - ], - "env": { - "OLLAMA_BASE_URL": "http://localhost:11434", - "OLLAMA_NUM_CTX": "131072" - } - }, - "sentry": { - "command": "npx", - "args": [ - "-y", - "@sentry/mcp-server@latest" - ] - }, "contextstream": { - "command": "npx", "args": [ - "-y", - "envmcp", - "--env-file", - ".env", + "/c", "contextstream-mcp" ], + "command": "cmd", "env": { "CONTEXTSTREAM_ALLOW_HEADER_AUTH": "false", + "CONTEXTSTREAM_API_KEY": "cbiq_SlhoP2xlfUE-jzVnFbeOBFSoaPPvf1aFET6TI3P9DXg", "CONTEXTSTREAM_API_URL": "https://api.contextstream.io", "CONTEXTSTREAM_AUTO_HIDE_INTEGRATIONS": "true", "CONTEXTSTREAM_CONSOLIDATED": "true", @@ -37,14 +18,29 @@ "CONTEXTSTREAM_LOG_LEVEL": "quiet", "CONTEXTSTREAM_OUTPUT_FORMAT": "compact", "CONTEXTSTREAM_PROGRESSIVE_MODE": "false", + "CONTEXTSTREAM_PROJECT_ID": "253a5461-8d7a-410e-a4a8-c8923e49eddc", "CONTEXTSTREAM_ROUTER_MODE": "false", - "CONTEXTSTREAM_SEARCH_LIMIT": "16", - "CONTEXTSTREAM_SEARCH_MAX_CHARS": "1024", + "CONTEXTSTREAM_SEARCH_LIMIT": "10", + "CONTEXTSTREAM_SEARCH_MAX_CHARS": "800", "CONTEXTSTREAM_SHOW_TIMING": "false", "CONTEXTSTREAM_TOOLSET": "standard", "CONTEXTSTREAM_TRANSCRIPTS_ENABLED": "false", - "CONTEXTSTREAM_USER_AGENT": "contextstream-mcp-rust/0.1.74" + "CONTEXTSTREAM_USER_AGENT": "contextstream-mcp-rust/0.1.77", + "CONTEXTSTREAM_WORKSPACE_ID": "f5c5b873-acfb-47ec-b93b-4acabfa78a8b" + } + }, + "ollama": { + "args": [ + "/c", + "npx", + "-y", + "@muhammadmehdi/ollama-mcp-server" + ], + "command": "cmd", + "env": { + "OLLAMA_BASE_URL": "http://localhost:11434", + "OLLAMA_NUM_CTX": "131072" } } } -} +} \ No newline at end of file diff --git a/.secretlintignore b/.secretlintignore new file mode 100644 index 0000000..690cf60 --- /dev/null +++ b/.secretlintignore @@ -0,0 +1,9 @@ +node_modules/ +bin/ +obj/ +packages/ +.vs/ +**/*.dll +**/*.pdb +**/*.exe +package-lock.json diff --git a/.secretlintrc.json b/.secretlintrc.json new file mode 100644 index 0000000..7a1a5df --- /dev/null +++ b/.secretlintrc.json @@ -0,0 +1,7 @@ +{ + "rules": [ + { + "id": "@secretlint/secretlint-rule-preset-recommend" + } + ] +} diff --git a/.superpowers/brainstorm/99-1774407490/.server-info b/.superpowers/brainstorm/99-1774407490/.server-info new file mode 100644 index 0000000..cc45ef4 --- /dev/null +++ b/.superpowers/brainstorm/99-1774407490/.server-info @@ -0,0 +1 @@ +{"type":"server-started","port":61964,"host":"127.0.0.1","url_host":"localhost","url":"http://localhost:61964","screen_dir":"C:\\Users\\winth\\dev\\sim-steward\\simhub-plugin/.superpowers/brainstorm/99-1774407490"} diff --git a/.superpowers/brainstorm/99-1774407490/.server.pid b/.superpowers/brainstorm/99-1774407490/.server.pid new file mode 100644 index 0000000..3ad5abd --- /dev/null +++ b/.superpowers/brainstorm/99-1774407490/.server.pid @@ -0,0 +1 @@ +99 diff --git a/.superpowers/brainstorm/99-1774407490/test-selection-current.html b/.superpowers/brainstorm/99-1774407490/test-selection-current.html new file mode 100644 index 0000000..17bb7ad --- /dev/null +++ b/.superpowers/brainstorm/99-1774407490/test-selection-current.html @@ -0,0 +1,64 @@ +

Step 3: Test Selection — What's Wrong & What Could Be

+

Current design vs 3 redesign approaches. Click to select your preferred direction.

+ +
+

Current Problems

+
+
+ Flat & Repetitive +

Every test card looks identical. No visual hierarchy. Groups are just text headers with checkboxes. The eye has nowhere to land.

+
+
+ Filter Bar Is Fake +

Chips just scroll to groups — they don't actually filter. Text filter hides cards but gives no count feedback. No way to filter by status or dependency.

+
+
+ No Selection Summary +

You toggle checkboxes but there's no "8 of 13 selected" or visual summary of what you're about to run. You have to scroll and count.

+
+
+ Dependencies Invisible +

T7 depends on T0 but nothing shows that visually. The T7 checkbox just silently disables. The feature flag for T_60Hz is hidden.

+
+
+
+ +
+
+
A
+
+

Grid Cards with Group Lanes

+

Replace the stacked list with a horizontal grid. Each group is a visual "lane" with a colored left accent matching the group's identity. Tests become compact cards in a 2- or 3-column grid within each lane. A sticky selection summary bar at the top shows "8/13 selected" with mini chips for each test. Dependencies shown as thin connecting lines between cards.

+
+

Pros

  • Dense — see all tests without scrolling
  • Groups feel distinct and visual
  • Summary bar gives instant feedback
+

Cons

  • Cards must be compact (less description space)
  • Dependency lines may feel noisy with 13 tests
+
+
+
+ +
+
B
+
+

Compact Tag Table + Selection Tray

+

A dense table layout — one row per test with columns for ID, name, group tag, dependency badge, and a toggle switch (not checkbox). A floating "selection tray" at the bottom collects selected tests as removable chips with drag reordering. Group filtering via colored tag pills in the header. The whole thing fits in one viewport.

+
+

Pros

  • Maximum density — all 13 tests visible at once
  • Tray gives clear "what will run" preview
  • Table is scannable and sortable
+

Cons

  • Less visual personality — table feels utilitarian
  • Less room for test descriptions
+
+
+
+ +
+
C
+
+

Interactive Test Map

+

A visual node map inspired by pipeline/DAG views. Each test is a node with its group color. Dependency arrows connect T0→T7. Nodes have toggle states (selected = filled, deselected = ghost). Groups are soft background regions. A sticky summary strip shows selection count + estimated run time. Clicking a group region toggles all its nodes. Search dims non-matching nodes.

+
+

Pros

  • Dependencies are first-class visual citizens
  • Most distinctive and memorable design
  • Natural for a test pipeline mental model
+

Cons

  • Most complex to implement
  • Layout may be harder to maintain as tests grow
+
+
+
+
+ +

Click your preferred approach, or tell me in the terminal if you want a hybrid.

diff --git a/.superpowers/brainstorm/99-1774407490/test-selection-expanded.html b/.superpowers/brainstorm/99-1774407490/test-selection-expanded.html new file mode 100644 index 0000000..3cdbf55 --- /dev/null +++ b/.superpowers/brainstorm/99-1774407490/test-selection-expanded.html @@ -0,0 +1,42 @@ +

Step 3: Test Selection — All Options Including D

+

Four directions. Click your pick — or describe a hybrid in the terminal.

+ +
+
+
A
+
+

Grid Cards with Group Lanes

+

Horizontal grid. Groups as colored lanes. Tests as compact cards in 2-3 columns. Sticky selection summary bar at top. Dependency lines between cards.

+
+
+ +
+
B
+
+

Compact Tag Table + Selection Tray

+

Dense table — one row per test. Toggle switches, group tag pills, dependency badges. Floating selection tray at bottom with removable chips. All 13 tests visible in one viewport.

+
+
+ +
+
C
+
+

Interactive Test Map (DAG)

+

Pipeline node map. Tests as toggleable nodes with group colors. Dependency arrows (T0→T7). Background regions per group. Sticky summary strip. Most visual, most complex.

+
+
+ +
+
D
+
+

Kanban-Style Group Columns

+

Each test group becomes a vertical column side-by-side — like a Kanban board. Column headers carry the group name + color accent + select-all toggle. Tests are stacked cards within each column. Dependencies shown as a subtle dashed connector between columns. A persistent selection bar spans the full width above the columns: "9 of 13 selected — Incident Capture (2) · SDK Data (3) · Camera (2) · Session (2)". The whole layout is horizontally scrollable if needed but aims to fit in viewport. Group columns have distinct radial gradient backgrounds (matching the step-bg pattern from the rest of the page).

+
+

Pros

  • Groups are visually parallel and scannable at a glance
  • Natural mental model — "columns of work"
  • Fits the page's existing gradient language
  • Easy to see what's selected per group
+

Cons

  • Horizontal space needed — may compress on narrow screens
  • Uneven column heights (1 test vs 3 tests)
+
+
+
+
+ +

Or tell me what you'd combine — e.g. "D's columns but with A's summary bar and C's dependency arrows"

diff --git a/.superpowers/brainstorm/99-1774407490/test-selection-sentry.html b/.superpowers/brainstorm/99-1774407490/test-selection-sentry.html new file mode 100644 index 0000000..1efdc51 --- /dev/null +++ b/.superpowers/brainstorm/99-1774407490/test-selection-sentry.html @@ -0,0 +1,146 @@ +

Test Selection — Sentry-Inspired Redesign

+

Sentry UI patterns adapted for test selection. Which combination resonates?

+ +
+

Sentry Patterns We Can Steal

+
+
+ Issue Stream +

Each test as a row with inline status pill, group tag, dependency badge, and a mini sparkline showing last-5-runs pass/fail history. Checkbox on hover, not always visible.

+
+
+ Tag Facets Sidebar +

Instead of filter chips, a left sidebar with clickable facets: Group (6), Dependency (2), Has Feature Flag (1). Click a facet value to filter the stream. Sentry's signature pattern.

+
+
+ Query Bar +

A Sentry-style composable query bar at top: group:camera is:selected has:dependency. Tags autocomplete. Feels powerful and searchable.

+
+
+
+ +
+

Mockup: The Sentry-Flavored Test Stream

+
+
Step 3 — Test Selection (Sentry-inspired)
+
+ + +
+ +
+ Incident 2 + SDK 3 + Camera 2 + Session 2 +
+
+ + +
+
+ 🔍 + is:selected + Filter by name, group, dependency... +
+
+ +
+ +
+
Group
+
+
Incident 2
+
SDK Data 3
+
Camera 2
+
Session 3
+
Discovery 1
+
High-Rate 1
+
+
Dependency
+
+
None 11
+
T0 1
+
Feature flag 1
+
+
+ + +
+ +
+ + T0 + Ground Truth Capture + incident + no deps +
+ +
+ + T1 + Speed Sweep Detection + incident + no deps +
+ +
+ + T2 + Variable Inventory + sdk + no deps +
+ +
+ + T7 + Incident Re-Seek + session + ← T0 +
+ +
+ + T_60Hz + 60 Hz Telemetry Dump + high-rate + ⚑ flag +
+
+ 8 more tests
+
+
+
+
+
+ +

Pick your direction

+

These can be combined. The Sentry patterns work with any of the original layouts.

+ +
+
+
E
+
+

Sentry Issue Stream (shown above)

+

Facet sidebar + query bar + flat stream rows. Most Sentry-native. Dense, scannable, powerful filtering. Selection summary bar at top. Dependencies and feature flags as inline badges. Group identity via colored tag pills. This is what Sentry Issues actually looks like — your future Sentry integration will feel seamless.

+
+
+ +
+
F
+
+

Sentry Stream + Grid Hybrid

+

Facet sidebar + query bar, but tests as mini cards in a grid instead of flat rows. 2-3 column card grid grouped by color. More visual than pure stream, keeps the powerful Sentry filtering. Selection summary at top. Best of both worlds if you want more visual personality than rows.

+
+
+ +
+
G
+
+

Sentry Query Bar + Kanban Columns

+

Query bar at top, groups as side-by-side columns below. Drops the facet sidebar (columns ARE the facets). Each column header has the group tag color + select-all. Sentry query bar for search/filter. Merges Option D with Sentry's query language. Dependencies as cross-column connectors.

+
+
+
diff --git a/CLAUDE.md b/CLAUDE.md index fd5b2d4..9db9391 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -1,83 +1,24 @@ + + # Workspace: sim-steward # Project: simhub-plugin # Workspace ID: f5c5b873-acfb-47ec-b93b-4acabfa78a8b -# ContextStream Rules -**MANDATORY STARTUP:** On the first message of EVERY session call `mcp__contextstream__init(...)` then `mcp__contextstream__context(user_message="...")`. On subsequent messages, call `mcp__contextstream__context(user_message="...")` first by default. A narrow bypass is allowed only for immediate read-only ContextStream calls when prior context is still fresh and no state-changing tool has run. - -## Quick Rules +# Claude Code Instructions | Message | Required | |---------|----------| -| **First message in session** | `mcp__contextstream__init(...)` → `mcp__contextstream__context(user_message="...")` BEFORE any other tool | -| **Subsequent messages (default)** | `mcp__contextstream__context(user_message="...")` FIRST, then other tools (narrow read-only bypass allowed when context is fresh + state is unchanged) | -| **Before file search** | `mcp__contextstream__search(mode="...", query="...")` BEFORE Glob/Grep/Read | +| **1st message** | `mcp__contextstream__init()` → `mcp__contextstream__context(user_message="...")` | +| **Subsequent messages (default)** | `mcp__contextstream__context(user_message="...")` FIRST (narrow read-only bypass when context is fresh and no state-changing tool has run) | +| **Before file search** | `mcp__contextstream__search(mode="auto")` BEFORE Glob/Grep/Read/Explore/Task/EnterPlanMode | -## Detailed Rules -**Read-only examples** (default: call `mcp__contextstream__context(...)` first; narrow bypass only for immediate read-only ContextStream calls when context is fresh and no state-changing tool has run): `mcp__contextstream__workspace(action="list"|"get"|"create")`, `mcp__contextstream__memory(action="list_docs"|"list_events"|"list_todos"|"list_tasks"|"list_transcripts"|"list_nodes"|"decisions"|"get_doc"|"get_event"|"get_task"|"get_todo"|"get_transcript")`, `mcp__contextstream__session(action="get_lessons"|"get_plan"|"list_plans"|"recall")`, `mcp__contextstream__help(action="version"|"tools"|"auth")`, `mcp__contextstream__project(action="list"|"get"|"index_status")`, `mcp__contextstream__reminder(action="list"|"active")`, any read-only data query - -**Common queries — use these exact tool calls:** -- "list lessons" / "show lessons" → `mcp__contextstream__session(action="get_lessons")` -- "list decisions" / "show decisions" / "how many decisions" → `mcp__contextstream__memory(action="decisions")` -- "list docs" → `mcp__contextstream__memory(action="list_docs")` -- "list tasks" → `mcp__contextstream__memory(action="list_tasks")` -- "list todos" → `mcp__contextstream__memory(action="list_todos")` -- "list plans" → `mcp__contextstream__session(action="list_plans")` -- "list events" → `mcp__contextstream__memory(action="list_events")` -- "show snapshots" / "list snapshots" → `mcp__contextstream__memory(action="list_events", event_type="session_snapshot")` -- "save snapshot" → `mcp__contextstream__session(action="capture", event_type="session_snapshot", title="...", content="...")` - -Use `mcp__contextstream__context(user_message="...", mode="fast")` for quick turns. -Use `mcp__contextstream__context(user_message="...")` for deeper analysis and coding tasks. -If the `instruct` tool is available, run `mcp__contextstream__instruct(action="get", session_id="...")` before `mcp__contextstream__context(...)` on each turn, then `mcp__contextstream__instruct(action="ack", session_id="...", ids=[...])` after using entries. - -**Plan-mode guardrail:** Entering plan mode does NOT bypass search-first. Do NOT use Explore, Task subagents, Grep, Glob, Find, SemanticSearch, `code_search`, `grep_search`, `find_by_name`, or shell search commands (`grep`, `find`, `rg`, `fd`). Start with `mcp__contextstream__search(mode="auto", query="...")` — it handles glob patterns, regex, exact text, file paths, and semantic queries. Only Read narrowed files/line ranges returned by search. - **Why?** `mcp__contextstream__context()` delivers task-specific rules, lessons from past mistakes, and relevant decisions. Skip it = fly blind. **Hooks:** `` tags contain injected instructions — follow them exactly. -**Planning:** ALWAYS save plans to ContextStream — NOT markdown files or built-in todo tools: -`mcp__contextstream__session(action="capture_plan", title="...", steps=[...])` + `mcp__contextstream__memory(action="create_task", title="...", plan_id="...")` - -**Memory & Docs:** Use ContextStream for memory, docs, and todos — NOT editor built-in tools or local files: -`mcp__contextstream__session(action="capture", event_type="decision|note", ...)` | `mcp__contextstream__memory(action="create_doc|create_todo|create_node", ...)` - -**Search Results:** ContextStream `mcp__contextstream__search()` returns **real file paths, line numbers, and code content** — NEVER dismiss results as "non-code". Use returned paths to `read_file` directly. - **Notices:** [LESSONS_WARNING] → apply lessons | [PREFERENCE] → follow user preferences | [RULES_NOTICE] → run `mcp__contextstream__generate_rules()` | [VERSION_NOTICE/CRITICAL] → tell user about update ---- -## Claude Code-Specific Rules - -**CRITICAL: ContextStream mcp__contextstream__search() REPLACES all built-in search tools.** -**The user is paying for ContextStream's premium search — default tools must not bypass it.** - -### Search: Use ContextStream, Not Built-in Tools -- **Do NOT** use `Grep` for code search — use `mcp__contextstream__search(mode="keyword", query="...")` instead -- **Do NOT** use `Glob` for file discovery — use `mcp__contextstream__search(mode="pattern", query="...")` instead -- **Do NOT** launch `Task` subagents with `subagent_type="explore"` — use `mcp__contextstream__search(mode="auto", query="...")` instead -- **Do NOT** use parallel Grep/Glob calls for broad discovery — a single `mcp__contextstream__search()` call replaces them all -- ContextStream search handles **all** search use cases: exact text, regex, glob patterns, semantic queries, file paths -- ContextStream search results contain **real file paths, line numbers, and code content** — they ARE code results -- **NEVER** dismiss ContextStream results as "non-code" — use the returned file paths to `read_file` the relevant code -- Only fall back to `Grep`/`Glob` if ContextStream search returns **exactly 0 results** - -### Search Mode Selection (use these instead of built-in tools): -- Instead of `Grep("pattern")`: use `mcp__contextstream__search(mode="keyword", query="pattern")` -- Instead of `Glob("**/*.tsx")`: use `mcp__contextstream__search(mode="pattern", query="*.tsx")` -- Instead of `Grep` with regex: use `mcp__contextstream__search(mode="pattern", query="regex")` -- Instead of `Task(subagent_type="explore")`: use `mcp__contextstream__search(mode="auto", query="")` - -### Memory: Use ContextStream, Not Local Files -- **Do NOT** write decisions/notes/specs to local files -- Use `mcp__contextstream__session(action="capture", event_type="decision|insight|operation|uncategorized", title="...", content="...")` -- Use `mcp__contextstream__memory(action="create_doc", title="...", content="...", doc_type="spec|general")` - -### Planning: Use ContextStream, Not Built-in Tools -- **Do NOT** create markdown plan files or use `TodoWrite` — they vanish across sessions -- **ALWAYS** save plans: `mcp__contextstream__session(action="capture_plan", title="...", steps=[...])` -- **ALWAYS** create tasks: `mcp__contextstream__memory(action="create_task", title="...", plan_id="...")` - +v0.4.65 + diff --git a/README.md b/README.md index 67a280a..84eed62 100644 --- a/README.md +++ b/README.md @@ -17,7 +17,7 @@ A **SimHub plugin + browser dashboard** for structured iRacing replay review. In | **Driver standings** | Position/car/driver/incident count, collapsible | | **Telemetry strip** | Throttle, brake, steering wheel (real data from plugin) | | **Selected Incident Panel** | Camera group dropdown (`cameraGroups` from plugin), ▶ Capture (`capture_incident`: pre-roll, optional camera, 1× speed), prev/next within filtered list | -| **Observability** | Structured logs → Grafana Loki (`SIMSTEWARD_LOKI_URL`); `capture_incident` includes correlation fields on `action_result`; re-capture confirms before sending (Loki is append-only) | +| **Observability** | Structured logs → Grafana Loki (`SIMSTEWARD_LOKI_URL`); optional **OTLP metrics** → local OpenTelemetry Collector → **Prometheus** (`OTEL_EXPORTER_OTLP_ENDPOINT`, **docs/observability-local.md**); `capture_incident` includes correlation fields on `action_result`; re-capture confirms before sending (Loki is append-only) | | **Replay incident index (iRacing replay)** | WebSocket actions `replay_incident_index_build` (`start` / `cancel`), `replay_incident_index_seek` (JSON `sessionTimeMs`, optional `sessionNum`), `replay_incident_index_record` (`on` / `off` — 60Hz NDJSON under `%LocalAppData%\SimSteward\replay-incident-index\record-samples\`). IRSDKSharper 60Hz poll, 16× fast-forward, detection → JSON index on disk (`...\{subSessionId}.json`, TR-020 v1). **Dashboard:** `http://:8888/Web/sim-steward-dash/replay-incident-index.html` (summary, sortable table, build/record, seeks); main dash links to it. Spec: [docs/IRACING-REPLAY-INCIDENT-INDEX-REQUIREMENTS.md](docs/IRACING-REPLAY-INCIDENT-INDEX-REQUIREMENTS.md). | **North-star / gaps still open:** true plugin-side **YAML scan** (session walk still uses the leaderboard frame list), **scrub bar** seek (PoC / toast only), **plugin-owned `suggestedCamera`**, **dual-view** capture, **OBS** integration. See [docs/PRODUCT-FLOW.md](docs/PRODUCT-FLOW.md) and [docs/DATA-ROUTING-OBSERVABILITY.md](docs/DATA-ROUTING-OBSERVABILITY.md) for what belongs in Loki vs a future metrics path. @@ -42,6 +42,8 @@ SimSteward.Plugin (C# / .NET 4.8 / SimHub) │ └──→ Grafana Loki (optional) plugin → HTTPS POST to SIMSTEWARD_LOKI_URL (single endpoint) + └──→ OTLP metrics (optional) + plugin → localhost:4317 → OpenTelemetry Collector → Prometheus (:9090) local Docker stack: observability/local/ ``` @@ -64,7 +66,7 @@ docs/ Documentation (start with docs/README.md) DATA-ROUTING-OBSERVABILITY.md Events vs high-rate telemetry (Loki vs OTel/metrics) TROUBLESHOOTING.md Runtime issues, deploy, logs -observability/local/ Local Grafana + Loki Docker stack +observability/local/ Local Grafana + Loki + Prometheus + OTel Collector Docker stack tests/ PowerShell integration tests scripts/ obs-bridge, Loki helpers, deploy utilities deploy.ps1 Build + deploy to local SimHub diff --git a/deploy.ps1 b/deploy.ps1 index 5e1b35d..72ef38d 100644 --- a/deploy.ps1 +++ b/deploy.ps1 @@ -1,18 +1,130 @@ -# Deploy Sim Steward plugin (skeleton) to local SimHub. -# Run from plugin/: .\deploy.ps1 +# Deploy Sim Steward plugin to local SimHub. +# Copies: plugin DLLs + every *.html (and README.txt) from src\SimSteward.Dashboard -> SimHub\Web\sim-steward-dash\ +# Run from plugin/: .\deploy.ps1 [-EnvFile path\to\secrets.env] # Requires: SimHub installed; place SimHub.Plugins.dll and GameReaderCommon.dll in lib\SimHub\ (or script copies from SimHub path). +param( + [string]$EnvFile = "" +) + $ErrorActionPreference = "Stop" $PluginRoot = $PSScriptRoot +$script:deployStartUtc = [DateTimeOffset]::UtcNow + +# Load env: default .env + optional observability merge, or -EnvFile (absolute or repo-relative) + same merge. +$loadDotenv = Join-Path $PluginRoot "scripts\load-dotenv.ps1" +if (Test-Path $loadDotenv) { + . $loadDotenv + $dotPaths = Resolve-SimStewardEnvPaths -RepoRoot $PluginRoot -EnvFile $EnvFile + Import-DotEnv $dotPaths + if (-not [string]::IsNullOrWhiteSpace($EnvFile)) { + Write-Host "Loaded secrets from -EnvFile $EnvFile (+ observability local merge if present)." + } +} + +# Deploy marker -> Grafana Explore (Loki). Default local stack when unset; avoid template Cloud URL without creds (530). +$localLoki = "http://localhost:3100" +$deployMarkerLocal = $false +if ([string]::IsNullOrWhiteSpace($env:SIMSTEWARD_LOKI_URL)) { + $env:SIMSTEWARD_LOKI_URL = $localLoki + $deployMarkerLocal = $true + Write-Host "Loki deploy log: SIMSTEWARD_LOKI_URL was unset - using $localLoki (start stack: npm run obs:up)." +} elseif ($env:SIMSTEWARD_LOKI_URL -match 'grafana\.net') { + $hasCloudBasic = -not [string]::IsNullOrWhiteSpace($env:SIMSTEWARD_LOKI_USER) -and -not [string]::IsNullOrWhiteSpace($env:SIMSTEWARD_LOKI_TOKEN) + if (-not $hasCloudBasic) { + Write-Warning "SIMSTEWARD_LOKI_URL is Grafana Cloud but SIMSTEWARD_LOKI_USER / SIMSTEWARD_LOKI_TOKEN missing - using $localLoki for deploy marker. Set both for Cloud." + $env:SIMSTEWARD_LOKI_URL = $localLoki + $deployMarkerLocal = $true + } +} +if ([string]::IsNullOrWhiteSpace($env:SIMSTEWARD_LOG_ENV)) { $env:SIMSTEWARD_LOG_ENV = "local" } +if ($deployMarkerLocal) { $env:SIMSTEWARD_LOG_ENV = "local" } + +# ── Loki push helper (fire-and-forget, never fatal) ────────────────────────── +$script:lokiHeaders = @{ 'Content-Type' = 'application/json' } +$lokiUser = $env:SIMSTEWARD_LOKI_USER +$lokiPass = $env:SIMSTEWARD_LOKI_TOKEN +$gatewayToken = $env:LOKI_PUSH_TOKEN +if (-not [string]::IsNullOrWhiteSpace($lokiUser) -and -not [string]::IsNullOrWhiteSpace($lokiPass)) { + $pair = [Text.Encoding]::ASCII.GetBytes(("{0}:{1}" -f $lokiUser.Trim(), $lokiPass.Trim())) + $script:lokiHeaders['Authorization'] = 'Basic ' + [Convert]::ToBase64String($pair) +} elseif (-not [string]::IsNullOrWhiteSpace($gatewayToken)) { + $script:lokiHeaders['Authorization'] = 'Bearer ' + $gatewayToken.Trim() +} +$script:lokiPushUri = $env:SIMSTEWARD_LOKI_URL.TrimEnd('/') + '/loki/api/v1/push' + +function Push-LokiEvent { + param( + [string]$Event, + [string]$Level = 'INFO', + [string]$Message = '', + [hashtable]$Fields = @{} + ) + try { + $tsNs = [DateTimeOffset]::UtcNow.ToUnixTimeMilliseconds() * 1000000 + $body = [ordered]@{ + event = $Event + level = $Level + message = $Message + machine = $env:COMPUTERNAME + } + foreach ($k in $Fields.Keys) { $body[$k] = $Fields[$k] } + $line = $body | ConvertTo-Json -Compress -Depth 5 + $stream = [ordered]@{ + stream = @{ + app = 'sim-steward' + env = $env:SIMSTEWARD_LOG_ENV + component = 'local-deployment' + level = $Level + } + values = @( , @( [string]$tsNs, $line ) ) + } + $payload = ([ordered]@{ streams = @( $stream ) }) | ConvertTo-Json -Depth 20 -Compress + Invoke-RestMethod -Uri $script:lokiPushUri -Method Post -Headers $script:lokiHeaders -Body $payload -TimeoutSec 5 | Out-Null + } catch { + # Non-fatal: deploy must not fail because Loki is down + } +} + +# Resolve git info for deploy context +$gitBranch = '' +$gitSha = '' +try { + $gitBranch = (& git -C $PluginRoot rev-parse --abbrev-ref HEAD 2>$null) + $gitSha = (& git -C $PluginRoot rev-parse --short HEAD 2>$null) +} catch {} + +# ── EVENT: deploy_started ──────────────────────────────────────────────────── +Push-LokiEvent 'deploy_started' 'INFO' 'Deploy script started' @{ + git_branch = $gitBranch + git_sha = $gitSha + env_file = $(if ($EnvFile) { $EnvFile } else { '.env (default)' }) + loki_url = $env:SIMSTEWARD_LOKI_URL +} $PluginDlls = @( "SimSteward.Plugin.dll", "Fleck.dll", "Newtonsoft.Json.dll", "IRSDKSharper.dll", - "YamlDotNet.dll" + "YamlDotNet.dll", + "Sentry.dll" ) +function Read-PluginDllProductVersion { + param([string]$DllPath) + try { + if (-not (Test-Path -LiteralPath $DllPath)) { return $null } + $full = (Resolve-Path -LiteralPath $DllPath).Path + return ([System.Diagnostics.FileVersionInfo]::GetVersionInfo($full)).ProductVersion + } catch { + return $null + } +} + +# Populated after DLL copy + verify (AssemblyInformationalVersion -> PE ProductVersion) +$script:SimStewardPluginVersionDeployed = $null + # ── Locate SimHub install path ─────────────────────────────────────────────── $SimHubPath = $null if ($env:SIMHUB_PATH -and (Test-Path $env:SIMHUB_PATH)) { @@ -33,11 +145,20 @@ if (-not $SimHubPath) { $SimHubPath = "C:\Program Files (x86)\SimHub" } $SimHubExe = Join-Path $SimHubPath "SimHubWPF.exe" if (-not (Test-Path $SimHubExe)) { + Push-LokiEvent 'deploy_failed' 'ERROR' 'SimHub not found' @{ simhub_path = $SimHubPath } Write-Error "SimHub not found at: $SimHubExe. Set SIMHUB_PATH to your SimHub folder." } Write-Host "SimHub path: $SimHubPath" + +# ── EVENT: deploy_simhub_found ─────────────────────────────────────────────── +Push-LokiEvent 'deploy_simhub_found' 'INFO' "SimHub located at $SimHubPath" @{ + simhub_path = $SimHubPath + simhub_exe = $SimHubExe +} + $DashboardSource = Join-Path $PluginRoot "src\SimSteward.Dashboard\index.html" if (-not (Test-Path $DashboardSource)) { + Push-LokiEvent 'deploy_failed' 'ERROR' 'Dashboard source not found' @{ path = $DashboardSource } Write-Error "Dashboard source not found: $DashboardSource" } # SimHub serves static HTML from Web/, not DashTemplates/ (DashTemplates requires .djson catalog) @@ -65,12 +186,26 @@ if ($sdkMissing) { # ── Build ─────────────────────────────────────────────────────────────────── Write-Host "Building..." +Push-LokiEvent 'deploy_build_started' 'INFO' 'dotnet build started' +$buildStart = Get-Date Push-Location $PluginRoot try { & dotnet build "src\SimSteward.Plugin\SimSteward.Plugin.csproj" -c Release --nologo -v q - if ($LASTEXITCODE -ne 0) { throw "Build failed with exit code $LASTEXITCODE." } + if ($LASTEXITCODE -ne 0) { + Push-LokiEvent 'deploy_build_result' 'ERROR' "Build failed (exit $LASTEXITCODE)" @{ + status = 'failed' + exit_code = $LASTEXITCODE + duration_s = [math]::Round(((Get-Date) - $buildStart).TotalSeconds, 1) + } + throw "Build failed with exit code $LASTEXITCODE." + } } finally { Pop-Location } +$buildDuration = [math]::Round(((Get-Date) - $buildStart).TotalSeconds, 1) Write-Host "Build succeeded." +Push-LokiEvent 'deploy_build_result' 'INFO' "Build succeeded in ${buildDuration}s" @{ + status = 'ok' + duration_s = $buildDuration +} # Resolve build output folder $outDir = Join-Path $PluginRoot "bin\Plugin" @@ -78,6 +213,7 @@ if (-not (Test-Path (Join-Path $outDir "SimSteward.Plugin.dll"))) { $outDir = Join-Path $outDir "net48" } if (-not (Test-Path (Join-Path $outDir "SimSteward.Plugin.dll"))) { + Push-LokiEvent 'deploy_failed' 'ERROR' 'Build output not found' @{ out_dir = $outDir } Write-Error "Build output not found. Expected SimSteward.Plugin.dll in bin\Plugin or bin\Plugin\net48" } @@ -88,29 +224,57 @@ if (-not $skipTests) { Where-Object { $_.Name -match "test" -or $_.Directory.Name -match "test" }) if ($testProjects.Count -gt 0) { Write-Host "Running unit tests..." + Push-LokiEvent 'deploy_tests_started' 'INFO' 'Unit tests started' @{ + test_type = 'unit' + project_count = $testProjects.Count + } + $testStart = Get-Date + $testRetried = $false Push-Location $PluginRoot try { & dotnet test --nologo -v q --no-build -c Release if ($LASTEXITCODE -ne 0) { Write-Host "Tests failed. Retrying once..." + $testRetried = $true & dotnet test --nologo -v q --no-build -c Release if ($LASTEXITCODE -ne 0) { - throw "Tests failed after retry. Deploy aborted — 100% pass required." + $testDuration = [math]::Round(((Get-Date) - $testStart).TotalSeconds, 1) + Push-LokiEvent 'deploy_tests_result' 'ERROR' 'Unit tests failed after retry' @{ + test_type = 'unit' + status = 'failed' + retried = $true + duration_s = $testDuration + } + throw "Tests failed after retry. Deploy aborted - 100% pass required." } } } finally { Pop-Location } + $testDuration = [math]::Round(((Get-Date) - $testStart).TotalSeconds, 1) Write-Host "All unit tests passed." + Push-LokiEvent 'deploy_tests_result' 'INFO' "Unit tests passed in ${testDuration}s" @{ + test_type = 'unit' + status = 'ok' + retried = $testRetried + duration_s = $testDuration + } } else { Write-Host "No test projects found; skipping unit tests." } } else { Write-Host "Skipping tests (SIMSTEWARD_SKIP_TESTS=1)." + Push-LokiEvent 'deploy_tests_result' 'INFO' 'Unit tests skipped' @{ + test_type = 'unit' + status = 'skipped' + } } # ── 1. Check if SimHub is open; if open, close (force) ─────────────────────── $running = @(Get-Process -Name "SimHubWPF" -ErrorAction SilentlyContinue) if ($running.Count -gt 0) { Write-Host "SimHub is running. Closing (force)..." + Push-LokiEvent 'deploy_simhub_stopping' 'INFO' "Killing SimHub ($($running.Count) process(es))" @{ + pid_list = ($running | ForEach-Object { $_.Id }) -join ',' + } foreach ($p in $running) { try { $p.Kill() @@ -127,22 +291,30 @@ if ($running.Count -gt 0) { } $still = @(Get-Process -Name "SimHubWPF" -ErrorAction SilentlyContinue) if ($still.Count -gt 0) { + Push-LokiEvent 'deploy_failed' 'ERROR' 'SimHub did not exit after 15s' Write-Error "SimHub did not exit after 15s. Close it manually and re-run deploy." } Write-Host "SimHub closed." + Push-LokiEvent 'deploy_simhub_stopped' 'INFO' 'SimHub process stopped' } else { Write-Host "SimHub was not running." + Push-LokiEvent 'deploy_simhub_stopped' 'INFO' 'SimHub was not running' } # ── 2. Delete existing plugin files in target location ─────────────────────── Write-Host "Removing existing plugin DLLs from $SimHubPath ..." +$deletedDlls = @() foreach ($d in $PluginDlls) { $target = Join-Path $SimHubPath $d if (Test-Path $target) { Remove-Item $target -Force Write-Host " Deleted: $d" + $deletedDlls += $d } } +Push-LokiEvent 'deploy_dlls_cleaned' 'INFO' "Removed $($deletedDlls.Count) existing DLLs" @{ + deleted = $deletedDlls -join ',' +} # ── 3. Copy build files to target location ────────────────────────────────── function Copy-DeployDlls { @@ -155,24 +327,37 @@ function Copy-DeployDlls { Write-Host "Copying DLLs to $SimHubPath ..." Copy-DeployDlls foreach ($d in $PluginDlls) { Write-Host " $d" } +Push-LokiEvent 'deploy_dlls_copied' 'INFO' "Copied $($PluginDlls.Count) DLLs to SimHub" @{ + dlls = $PluginDlls -join ',' + target_dir = $SimHubPath +} Write-Host "Copying dashboard to $DashboardTargetDir ..." +$dashboardSrcDir = Join-Path $PluginRoot "src\SimSteward.Dashboard" $dashboardTargetFile = Join-Path $DashboardTargetDir "index.html" Copy-Item $DashboardSource $dashboardTargetFile -Force Write-Host " index.html" -$DashboardReplaySource = Join-Path $PluginRoot "src\SimSteward.Dashboard\replay-incident-index.html" -if (Test-Path $DashboardReplaySource) { - Copy-Item $DashboardReplaySource (Join-Path $DashboardTargetDir "replay-incident-index.html") -Force - Write-Host " replay-incident-index.html" +$copiedDashboards = @("index.html") +foreach ($f in Get-ChildItem -Path $dashboardSrcDir -File -ErrorAction SilentlyContinue) { + if ($f.Extension -eq ".html" -and $f.Name -ne "index.html") { + Copy-Item $f.FullName (Join-Path $DashboardTargetDir $f.Name) -Force + Write-Host " $($f.Name)" + $copiedDashboards += $f.Name + } } -$readmeSource = Join-Path $PluginRoot "src\SimSteward.Dashboard\README.txt" +$readmeSource = Join-Path $dashboardSrcDir "README.txt" if (Test-Path $readmeSource) { Copy-Item $readmeSource (Join-Path $DashboardTargetDir "README.txt") -Force Write-Host " README.txt" } if (-not (Test-Path $dashboardTargetFile)) { + Push-LokiEvent 'deploy_failed' 'ERROR' 'Dashboard copy failed' @{ target = $dashboardTargetFile } Write-Error "Dashboard copy failed." } +Push-LokiEvent 'deploy_dashboard_copied' 'INFO' "Copied $($copiedDashboards.Count) dashboard files" @{ + dashboards = $copiedDashboards -join ',' + target_dir = $DashboardTargetDir +} # ── 4. Confirm new copy is deployed; if not, retry once ────────────────────── function Test-DeploySuccess { @@ -182,30 +367,124 @@ function Test-DeploySuccess { if (-not (Test-Path $target)) { Write-Host " Missing: $d"; $ok = $false } elseif ((Get-Item $target).Length -eq 0) { Write-Host " Empty: $d"; $ok = $false } } + $dashSrc = Join-Path $PluginRoot "src\SimSteward.Dashboard" + foreach ($html in Get-ChildItem -Path $dashSrc -Filter "*.html" -File -ErrorAction SilentlyContinue) { + $t = Join-Path $DashboardTargetDir $html.Name + if (-not (Test-Path $t)) { Write-Host " Missing dashboard: $($html.Name)"; $ok = $false } + elseif ((Get-Item $t).Length -eq 0) { Write-Host " Empty dashboard: $($html.Name)"; $ok = $false } + } return $ok } Write-Host "Verifying deploy..." +$verifyRetried = $false if (-not (Test-DeploySuccess)) { Write-Host "Deploy verification failed. Retrying copy once..." + $verifyRetried = $true Copy-DeployDlls if (-not (Test-DeploySuccess)) { + Push-LokiEvent 'deploy_verified' 'ERROR' 'Verification failed after retry' @{ + status = 'failed' + retried = $true + } Write-Error "Deploy failed after retry. Check permissions and disk space." } } Write-Host "Deploy verified." +Push-LokiEvent 'deploy_verified' 'INFO' 'All files verified in target' @{ + status = 'ok' + retried = $verifyRetried +} + +$deployedPluginDll = Join-Path $SimHubPath "SimSteward.Plugin.dll" +$script:SimStewardPluginVersionDeployed = Read-PluginDllProductVersion $deployedPluginDll +Write-Host "" +if (-not [string]::IsNullOrWhiteSpace($script:SimStewardPluginVersionDeployed)) { + Write-Host "=== SimSteward plugin version (deployed): $($script:SimStewardPluginVersionDeployed) ===" -ForegroundColor Cyan + Push-LokiEvent 'deploy_version_resolved' 'INFO' "Plugin version: $($script:SimStewardPluginVersionDeployed)" @{ + plugin_version = $script:SimStewardPluginVersionDeployed + } +} else { + Write-Warning "Could not read ProductVersion from SimSteward.Plugin.dll after deploy." + Push-LokiEvent 'deploy_version_resolved' 'WARN' 'Could not read plugin version from DLL' @{ + plugin_version = 'unknown' + } +} +Write-Host "" + +# ── Sentry release + deploy tracking ──────────────────────────────────────── +$sentryOrg = 'sim-steward' +$sentryProjects = @('simhub-plugin', 'web-dashboards') +$sentryAuthToken = if (-not [string]::IsNullOrWhiteSpace($env:SENTRY_AUTH_TOKEN)) { $env:SENTRY_AUTH_TOKEN } + elseif (-not [string]::IsNullOrWhiteSpace($env:SENTRY_ELEVATED_API_KEY)) { $env:SENTRY_ELEVATED_API_KEY } + else { $null } +$sentryRelease = if (-not [string]::IsNullOrWhiteSpace($script:SimStewardPluginVersionDeployed)) { $script:SimStewardPluginVersionDeployed } else { $null } + +function Push-SentryApi { + param([string]$Path, [hashtable]$Body) + if ([string]::IsNullOrWhiteSpace($sentryAuthToken) -or [string]::IsNullOrWhiteSpace($sentryRelease)) { return } + try { + $url = "https://sentry.io/api/0/organizations/$sentryOrg/$Path" + $json = $Body | ConvertTo-Json -Compress -Depth 5 + $headers = @{ Authorization = "Bearer $sentryAuthToken"; 'Content-Type' = 'application/json' } + Invoke-RestMethod -Uri $url -Method Post -Headers $headers -Body $json -ErrorAction Stop | Out-Null + } catch { + # Non-fatal: deploy must not fail because Sentry API is down + Write-Warning "Sentry API ($Path): $($_.Exception.Message)" + } +} + +if (-not [string]::IsNullOrWhiteSpace($sentryAuthToken) -and -not [string]::IsNullOrWhiteSpace($sentryRelease)) { + Write-Host "Registering Sentry release: $sentryRelease (3 projects)" + + # Create release across all 3 projects with commits + $fullSha = '' + try { $fullSha = (& git -C $PluginRoot rev-parse HEAD 2>$null) } catch {} + Push-SentryApi "releases/" @{ + version = $sentryRelease + projects = $sentryProjects + refs = @(@{ repository = "simsteward/simhub-plugin"; commit = $fullSha }) + } + + # Deploy: simhub-plugin (C# DLLs) + Push-SentryApi "releases/$sentryRelease/deploys/" @{ + environment = 'local' + name = 'simhub-plugin' + } + + # Deploy: web-dashboards (all HTML/JS dashboards) + Push-SentryApi "releases/$sentryRelease/deploys/" @{ + environment = 'local' + name = 'web-dashboards' + } + + Write-Host "Sentry release + 2 deploys registered (simhub-plugin, web-dashboards)." + Push-LokiEvent 'deploy_sentry_release' 'INFO' "Sentry release registered: $sentryRelease" @{ + sentry_release = $sentryRelease + sentry_org = $sentryOrg + sentry_projects = ($sentryProjects -join ',') + } +} elseif ([string]::IsNullOrWhiteSpace($sentryAuthToken)) { + Write-Host "Skipping Sentry release tracking (SENTRY_AUTH_TOKEN not set)." +} # ── 5. Re-launch SimHub ───────────────────────────────────────────────────── $skipLaunch = $env:SIMHUB_SKIP_LAUNCH -eq "1" if ($skipLaunch) { Write-Host "Skipping SimHub launch (SIMHUB_SKIP_LAUNCH=1). Plugin WebSocket server listens on port 19847 once SimHub starts." + Push-LokiEvent 'deploy_simhub_launch' 'INFO' 'SimHub launch skipped' @{ status = 'skipped' } } else { Write-Host "Launching SimHub..." Start-Process -FilePath $SimHubExe -WorkingDirectory $SimHubPath Write-Host "Done. Dashboard: http://localhost:8888/Web/sim-steward-dash/index.html (Web Page component) | WebSocket: $(if ($env:SIMSTEWARD_WS_BIND) { $env:SIMSTEWARD_WS_BIND } else { '127.0.0.1' }):$(if ($env:SIMSTEWARD_WS_PORT) { $env:SIMSTEWARD_WS_PORT } else { '19847' })" + Push-LokiEvent 'deploy_simhub_launch' 'INFO' 'SimHub process started' @{ + status = 'launched' + simhub_exe = $SimHubExe + } } # ── 6. Post-deploy tests ─────────────────────────────────────────────────── +$postDeployFailed = $false if (-not $skipTests) { $testsDir = Join-Path $PluginRoot "tests" $testScripts = @() @@ -225,22 +504,57 @@ if (-not $skipTests) { } if ($simHubRunning) { Write-Host "Running post-deploy tests ($($testScripts.Count) script(s))..." - $postDeployFailed = $false + Push-LokiEvent 'deploy_post_tests_started' 'INFO' "Running $($testScripts.Count) post-deploy test(s)" @{ + test_type = 'post-deploy' + script_count = $testScripts.Count + scripts = ($testScripts | ForEach-Object { $_.Name }) -join ',' + } foreach ($ts in $testScripts) { Write-Host " Running: $($ts.Name)" + $tsStart = Get-Date & pwsh -NoProfile -File $ts.FullName if ($LASTEXITCODE -ne 0) { Write-Host " FAIL: $($ts.Name) (exit code $LASTEXITCODE). Retrying once..." + Push-LokiEvent 'deploy_post_test_result' 'WARN' "$($ts.Name) failed, retrying" @{ + test_type = 'post-deploy' + script = $ts.Name + status = 'failed_will_retry' + exit_code = $LASTEXITCODE + } Start-Sleep -Milliseconds 3000 & pwsh -NoProfile -File $ts.FullName + $tsDuration = [math]::Round(((Get-Date) - $tsStart).TotalSeconds, 1) if ($LASTEXITCODE -ne 0) { Write-Host " FAIL: $($ts.Name) failed after retry." $postDeployFailed = $true + Push-LokiEvent 'deploy_post_test_result' 'ERROR' "$($ts.Name) failed after retry" @{ + test_type = 'post-deploy' + script = $ts.Name + status = 'failed' + retried = $true + exit_code = $LASTEXITCODE + duration_s = $tsDuration + } } else { Write-Host " PASS: $($ts.Name) (passed on retry)" + Push-LokiEvent 'deploy_post_test_result' 'INFO' "$($ts.Name) passed on retry" @{ + test_type = 'post-deploy' + script = $ts.Name + status = 'ok' + retried = $true + duration_s = $tsDuration + } } } else { + $tsDuration = [math]::Round(((Get-Date) - $tsStart).TotalSeconds, 1) Write-Host " PASS: $($ts.Name)" + Push-LokiEvent 'deploy_post_test_result' 'INFO' "$($ts.Name) passed" @{ + test_type = 'post-deploy' + script = $ts.Name + status = 'ok' + retried = $false + duration_s = $tsDuration + } } } if ($postDeployFailed) { @@ -250,8 +564,57 @@ if (-not $skipTests) { } } else { Write-Host "SimHub not running; skipping post-deploy tests (run tests\*.ps1 manually after starting SimHub)." + Push-LokiEvent 'deploy_post_tests_started' 'WARN' 'Post-deploy tests skipped - SimHub not running' @{ + test_type = 'post-deploy' + status = 'skipped' + reason = 'simhub_not_running' + } + } + } +} + +# SimHub serves /Web/... on its own HTTP port (default 8888). Deploy only copies files; binding is SimHub's job. +if (-not $skipLaunch) { + Start-Sleep -Seconds 6 + if (@(Get-Process -Name "SimHubWPF" -ErrorAction SilentlyContinue).Count -gt 0) { + try { + $probe8888 = Test-NetConnection -ComputerName 127.0.0.1 -Port 8888 -WarningAction SilentlyContinue + if ($probe8888.TcpTestSucceeded) { + Push-LokiEvent 'deploy_port_probe' 'INFO' 'Port 8888 is listening' @{ + port = 8888 + status = 'ok' + } + } else { + Write-Host "" + Write-Warning "Port 8888 is not accepting connections. HTML is deployed under SimHub\Web\sim-steward-dash\ but SimHub's built-in web server is not listening. In SimHub: check Settings for HTTP/web port (default 8888), open Dash Studio or http://127.0.0.1:8888/ after startup, firewall. See docs/TROUBLESHOOTING.md (section 3b)." + Push-LokiEvent 'deploy_port_probe' 'WARN' 'Port 8888 not listening' @{ + port = 8888 + status = 'not_listening' + } + } + } catch { + Push-LokiEvent 'deploy_port_probe' 'WARN' "Port 8888 probe error: $($_.Exception.Message)" @{ + port = 8888 + status = 'error' + } } } } -Write-Host "Deploy complete." +# ── EVENT: deploy_completed ────────────────────────────────────────────────── +$totalDuration = [math]::Round(([DateTimeOffset]::UtcNow - $script:deployStartUtc).TotalSeconds, 1) +$finalStatus = if ($postDeployFailed) { 'completed_with_warnings' } else { 'ok' } +$finalLevel = if ($postDeployFailed) { 'WARN' } else { 'INFO' } +$pvOut = if ([string]::IsNullOrWhiteSpace($script:SimStewardPluginVersionDeployed)) { "(unknown)" } else { $script:SimStewardPluginVersionDeployed } + +Push-LokiEvent 'deploy_completed' $finalLevel "Deploy finished in ${totalDuration}s - $finalStatus" @{ + status = $finalStatus + plugin_version = $pvOut + post_deploy_warn = $postDeployFailed + duration_s = $totalDuration + git_branch = $gitBranch + git_sha = $gitSha + simhub_path = $SimHubPath +} + +Write-Host "Deploy complete. Plugin version: $pvOut (${totalDuration}s, $($finalStatus))" diff --git a/docs/ARCHITECTURE.md b/docs/ARCHITECTURE.md index 039560c..51afda4 100644 --- a/docs/ARCHITECTURE.md +++ b/docs/ARCHITECTURE.md @@ -4,6 +4,22 @@ Diagrams covering C# data structures, WebSocket message contracts, data API sche --- +## Code map (search anchor) + +`SimStewardPlugin` is a **partial class** split across several files (same type, compile-time merge). Use this table to jump from a concern to source. + +| Subsystem | Primary paths | Role | +|-----------|---------------|------| +| Plugin host, lifecycle, WebSocket server, action dispatch | `src/SimSteward.Plugin/SimStewardPlugin.cs` | `IPlugin` / `IDataPlugin` entry, Fleck WS, `DispatchAction`, snapshot broadcast | +| Live + replay incident detection | `src/SimSteward.Plugin/SimStewardPlugin.Incidents.cs` | YAML deltas, incident logging, replay search hooks | +| Replay incident index (data) | `src/SimSteward.Plugin/SimStewardPlugin.ReplayIncidentIndex.cs`, `SimStewardPlugin.ReplayIncidentIndexBuild.cs` | Index build, TR-019-style payloads | +| Replay incident index (dashboard / WS actions) | `src/SimSteward.Plugin/SimStewardPlugin.ReplayIncidentIndexDashboard.cs` | WS actions for index UI | +| Data capture suite (SDK / Loki verification) | `src/SimSteward.Plugin/SimStewardPlugin.DataCaptureSuite.cs` | Capture-suite actions and plumbing | +| Dashboard UI (SimHub HTTP) | `src/SimSteward.Dashboard/index.html`, `replay-incident-index.html`, `data-capture-suite.html` | Browser ES6+ clients; WS to plugin on `SIMSTEWARD_WS_PORT` | +| Structured logging / Loki | `SessionLogging`, sinks under `src/SimSteward.Plugin/` (see [GRAFANA-LOGGING.md](GRAFANA-LOGGING.md)) | JSONL + optional Loki push | + +--- + ## C# Plugin — Core Data Structures Classes that drive the WebSocket state broadcast and structured logging. @@ -122,7 +138,7 @@ classDiagram * **Why?** 1. **Security:** Doing so would require embedding sensitive API tokens (like `SIMSTEWARD_LOKI_TOKEN`) directly into the client-side JavaScript, where anyone could extract them. 2. **CORS:** Browsers will block cross-origin requests from the local SimHub web server (`localhost:8888`) to external domains unless complex and insecure CORS policies are configured on the destination server. -* **The Solution:** The dashboard must route all observability intents (like capturing an incident) through the WebSocket to the C# Plugin. The C# Plugin acts as a secure backend, utilizing `PluginLogger` to batch and execute the HTTPS POST requests to `SIMSTEWARD_LOKI_URL` from a trusted server environment. +* **The Solution:** The dashboard must route all observability intents (like capturing an incident) through the WebSocket to the C# Plugin. The C# Plugin acts as a secure backend: **`PluginLogger`** persists structured lines to **`plugin-structured.jsonl`** (and mirrors them over WebSocket). **Loki HTTP push of those lines is not implemented in-process in this repo yet** — use an external shipper tailing the JSONL file, or **`send-deploy-loki-marker.ps1`** for deploy-only markers when `SIMSTEWARD_LOKI_URL` is set. --- @@ -245,7 +261,7 @@ sequenceDiagram T->>P: incident_detected callback P->>P: Enrich with session context (MergeSessionAndRoutingFields) P->>JSONL: Write incident_detected NDJSON line - JSONL-->>Loki: Plugin batches HTTPS POST to SIMSTEWARD_LOKI_URL (async) + JSONL-->>Loki: External shipper or future batch POST (not in plugin today) P->>D: Broadcast updated incidents[] via WebSocket D->>D: Re-render leaderboard + filter chips end diff --git a/docs/DATA-ROUTING-OBSERVABILITY.md b/docs/DATA-ROUTING-OBSERVABILITY.md index bb63266..67939c2 100644 --- a/docs/DATA-ROUTING-OBSERVABILITY.md +++ b/docs/DATA-ROUTING-OBSERVABILITY.md @@ -19,6 +19,8 @@ These are the **intended** paths for SimSteward observability data. Implementati **Grafana** is the visualization layer; it is not the scaling bottleneck. +**Local stack (metrics path):** The repo’s **Docker Compose** stack runs **OpenTelemetry Collector** (OTLP gRPC/HTTP → Prometheus scrape endpoint) and **Prometheus** alongside Loki and Grafana. The SimHub plugin sends **OTLP metrics** when `OTEL_EXPORTER_OTLP_ENDPOINT` or `SIMSTEWARD_OTLP_ENDPOINT` is set (no in-process `/metrics` HTTP server — avoids HttpListener constraints). End-to-end steps and ports: **docs/observability-local.md** § Metrics. + --- ## 2. Decision matrix @@ -132,6 +134,7 @@ These stay **events or throttled snapshots** in structured logs — **not** a mi ## References +- **docs/observability-local.md** — Local Grafana/Loki/**Prometheus** compose, OTLP env, smoke queries. - **docs/GRAFANA-LOGGING.md** — Loki schema, volume table, events. - **docs/IRACING-OBSERVABILITY-STRATEGY.md** — iRacing SDK telemetry mapping to Prometheus metrics & Loki events. - **docs/observability-scaling.md** — Many users, central Loki, label rules. diff --git a/docs/GRAFANA-LOGGING.md b/docs/GRAFANA-LOGGING.md index e05b692..6f3f471 100644 --- a/docs/GRAFANA-LOGGING.md +++ b/docs/GRAFANA-LOGGING.md @@ -1,14 +1,14 @@ # Grafana Loki Structured Logging -Structured logging from the SimSteward plugin to Grafana Loki (Grafana Cloud or local Docker). All logs are event-driven; no per-tick logging in production. The pipeline: **Plugin** → `PluginLogger.Structured()` → **plugin-structured.jsonl** (NDJSON on disk for durability) → **HTTPS POST** to **one** Loki push endpoint (`SIMSTEWARD_LOKI_URL`). **No** per-user log shipper, forwarder, or Grafana agent on the user PC — ingestion is **in-process** from the plugin (see **docs/observability-scaling.md**). The in-dashboard log stream is pushed via WebSocket; if sends fail, the plugin writes to **broadcast-errors.log** (see **docs/TROUBLESHOOTING.md** §4b) — that file is not sent to Loki. Explore, custom panels, and AI tooling use the 4-label schema and fixed `event` taxonomy below. For scaling (many users, large grids, label rules, LogQL), see **docs/observability-scaling.md**. Data routing (OTel vs Loki vs Prometheus, ~1k users, which telemetry is metrics vs logs): **docs/DATA-ROUTING-OBSERVABILITY.md**. Local Docker / quick start: **docs/observability-local.md**. +Structured logging for SimSteward (Grafana Loki / Grafana Cloud or local Docker). All logs are event-driven; no per-tick logging in production. **Implemented today:** **Plugin** → `PluginLogger.Structured()` → **plugin-structured.jsonl** (NDJSON on disk) and the same entries are **mirrored to the dashboard over WebSocket**. **This repository’s plugin does not yet HTTP POST log lines to Loki** — `SIMSTEWARD_LOKI_URL` is used for routing metadata in JSON (`loki_push_target`), optional **read** paths (e.g. data-capture suite verification), and **scripts**: **`deploy.ps1`** posts a single **`deploy_marker`** via **`send-deploy-loki-marker.ps1`** when the URL is set. To see full plugin logs in Loki today, run an **external shipper** that tails **`plugin-structured.jsonl`** into your stack, or add in-process batch POST later (see **docs/observability-scaling.md**). If WebSocket log sends fail, the plugin writes to **broadcast-errors.log** (see **docs/TROUBLESHOOTING.md** §4b). Explore, custom panels, and AI tooling use the 4-label schema and fixed `event` taxonomy below. Data routing: **docs/DATA-ROUTING-OBSERVABILITY.md**. Local stack: **docs/observability-local.md**. -**Loki: unencumbered stream.** No filtering is applied before Loki. The plugin writes every log entry to **plugin-structured.jsonl** and sends the same stream to Loki; do not filter at the push path. Loki retains the full stream. +**Loki stream (when ingested):** Do not filter at the push path — ship the same lines you write to **plugin-structured.jsonl**. **Filtering is dashboard-only.** The web dashboard receives the full stream via WebSocket and applies level/event visibility filters for display only (checkboxes and `hiddenLevels` / `hiddenEvents`). Toggling "hide DEBUG" or hiding specific event types in the dashboard shows or hides entries that are already in the stream; nothing is dropped at the plugin. ### Local vs prod (same pipeline; env label only) -One pipeline for both: plugin writes all logs to **plugin-structured.jsonl** and **POSTs** them to Loki at **`SIMSTEWARD_LOKI_URL`** (single endpoint per deployment). Set `SIMSTEWARD_LOG_ENV=local` for local dev (e.g. Docker stack) or `SIMSTEWARD_LOG_ENV=production` (default); this sets metadata on log lines (`log_env` / routing hints). No source-level omission: Loki and the dashboard stream are full. Volume is controlled by event-driven logging (no per-tick logs) and by the dashboard display filter. +**Env label:** Set `SIMSTEWARD_LOG_ENV=local` for local dev or `SIMSTEWARD_LOG_ENV=production` (default); this flows into JSON as `log_env` / routing hints. The dashboard WebSocket stream is full; Loki reflects whatever you ingest from **plugin-structured.jsonl** (or future in-process POST). Volume is controlled by event-driven logging (no per-tick logs) and by the dashboard display filter. ## Grafana Cloud free tier limits @@ -25,7 +25,7 @@ Volume allowance: free tier ~50 GB/month; our budget is < 1 GB/month. ### Scale: hundreds of drivers / many users -Stream count and labels stay bounded (four labels only; no `session_id` or `driver_id` as labels). Session-end results with 100–200+ drivers use chunked `session_end_datapoints_results` (35 drivers per line); merge chunks in Grafana. Many SimSteward users can send to **one** central Loki (each plugin POSTs to the same endpoint); use an optional bounded `instance_id` label if you need tenancy in queries. Do not log per-driver per-tick in Loki; use metrics (OTel) for high-frequency telemetry. Full stream/volume math, label rules, and query patterns: **docs/observability-scaling.md**. +Stream count and labels stay bounded (four labels only; no `session_id` or `driver_id` as labels). Session-end results with 100–200+ drivers use chunked `session_end_datapoints_results` (35 drivers per line); merge chunks in Grafana. Many SimSteward users can send to **one** central Loki (each instance ships to the same endpoint); use an optional bounded `instance_id` label if you need tenancy in queries. Do not log per-driver per-tick in Loki; use metrics (OTel) for high-frequency telemetry. Full stream/volume math, label rules, and query patterns: **docs/observability-scaling.md**. ### Volume budget (per session, ~2 h) @@ -44,13 +44,33 @@ At 30 sessions/month: ~7 MB. Never log on a tick; `DataUpdate()` runs at 60 Hz. Four labels only. Do **not** put high-cardinality values (`session_id`, `car_number`, `action`, `correlation_id`) in labels—they stay in the JSON body. +### Two `app` namespaces + +| `app` | Audience | What it covers | +|-------|----------|----------------| +| `sim-steward` | Product / runtime | C# plugin, dashboard, deploy | +| `claude-dev-logging` | Dev tooling observability | Claude Code hooks, MCP server instrumentation | + +### `app=sim-steward` (product) + | Label | Values | Rationale | |-------|--------|-----------| -| `app` | `sim-steward` | Namespace. | +| `app` | `sim-steward` | Product namespace. | | `env` | `production` or `local` | From `SIMSTEWARD_LOG_ENV`. | -| `component` | `simhub-plugin`, `bridge`, `tracker`, `dashboard` | Subsystem. | +| `component` | `simhub-plugin`, `bridge`, `tracker`, `dashboard`, `deploy` | Subsystem. | | `level` | `INFO`, `WARN`, `ERROR`, `DEBUG` | Severity. `DEBUG` only when `SIMSTEWARD_LOG_DEBUG=1`. | +### `app=claude-dev-logging` (dev tooling) + +| Label | Values | Rationale | +|-------|--------|-----------| +| `app` | `claude-dev-logging` | Dev tooling namespace. | +| `env` | `local` or `dev` | From `SIMSTEWARD_LOG_ENV`. | +| `component` | `hook`, `mcp-contextstream`, `mcp-sentry`, `mcp-ollama` | Subsystem. | +| `level` | `INFO`, `WARN`, `ERROR` | Severity. | + +The generic hook logger (`~/.claude/hooks/loki-log.js`) uses `component=hook`. MCP-specific dedicated hooks use `component=mcp-`. MCP service is also detected in the JSON body `service` field for tool-level queries: `{app="claude-dev-logging", component="hook"} | json | service="contextstream"`. + ## Event taxonomy Every log line has an `event` field. Key events: @@ -65,6 +85,7 @@ Every log line has an `event` field. Key events: | `bridge_starting` | simhub-plugin | `bind`, `port` | WebSocket bridge starting. | | `bridge_start_failed` | simhub-plugin | `bind`, `port`, `error` | WebSocket server failed to start (WARN). | | `plugin_ready` | simhub-plugin | `ws_port`, `env` | Lifecycle readiness. | +| `deploy_marker` | simhub-plugin | `deploy_status` (`ok` \| `failed`), `post_deploy_warn`, `detail`, `machine`, `simhub_path` | **Not from the in-process plugin** — one line at end of `deploy.ps1` via [scripts/send-deploy-loki-marker.ps1](../scripts/send-deploy-loki-marker.ps1) when `SIMSTEWARD_LOKI_URL` is set. **WARN** level if `post_deploy_warn` (post-deploy `tests/*.ps1` failed after retry). Use Grafana dashboard **Sim Steward — Deploy health** (`simsteward-deploy-health`). | | `host_resource_sample` | simhub-plugin | `process_cpu_pct`, `process_working_set_mb`, `process_private_mb`, `gc_heap_mb`, `process_threads`, `disk_root`, `disk_total_gb`, `disk_free_gb`, `disk_used_pct`, `ws_clients`, `sample_interval_sec` | **~1/min** (default): SimHub process CPU (share of all logical CPUs), memory, managed heap, and usage of the drive that hosts plugin data. Tune interval with `SIMSTEWARD_RESOURCE_SAMPLE_SEC` (15–3600). Use Explore time series on numeric fields to spot spikes; rising `process_working_set_mb` / `gc_heap_mb` over hours suggests growth (not necessarily a leak—correlate with sessions). | | `log_streaming_subscribed` | simhub-plugin | — | Dashboard log streaming attached. | | `irsdk_started` | simhub-plugin | — | iRacing SDK started. | diff --git a/docs/IRACING-OBSERVABILITY-STRATEGY.md b/docs/IRACING-OBSERVABILITY-STRATEGY.md index 31320b9..f697483 100644 --- a/docs/IRACING-OBSERVABILITY-STRATEGY.md +++ b/docs/IRACING-OBSERVABILITY-STRATEGY.md @@ -37,7 +37,7 @@ iRacing SDK (60Hz) │ ├──── Prometheus metrics (/metrics endpoint, scraped every 1s) │ - └──── Loki log shipping (via Promtail or Grafana Alloy, structured JSON) + └──── Loki log shipping (structured JSON via HTTP POST or external shipper) iRacing REST API (post-race) │ diff --git a/docs/README.md b/docs/README.md index 2024d60..e2d0306 100644 --- a/docs/README.md +++ b/docs/README.md @@ -19,7 +19,16 @@ Editing files outside the **SimHub rule doc allowlist** does not attach the full | Doc | Use when | |-----|----------| -| [ARCHITECTURE.md](ARCHITECTURE.md) | Class diagrams (PluginSnapshot, LogEntry, WS messages), ER diagram (data API), sequence diagrams (action dispatch, incident pipeline) | +| [ARCHITECTURE.md](ARCHITECTURE.md) | Class diagrams (PluginSnapshot, LogEntry, WS messages), ER diagram (data API), sequence diagrams (action dispatch, incident pipeline); **Code map** table at top links partial `SimStewardPlugin` files | + +--- + +## ContextStream index and mapping + +- **Workspace:** Open this repo as a **single-folder** Cursor workspace rooted at `simhub-plugin` so search and tooling are not mixed with unrelated paths (other clones, AppData, etc.). +- **ContextStream project:** Keep the ContextStream **project path** aligned with that same folder so `ingest_local` / MCP index the intended tree. +- **Corpus hygiene:** [`.cursorignore`](../.cursorignore) trims noise for Cursor; after changing ignore rules or large doc/code moves, run a **forced** ContextStream ingest (`npm run contextstream:ingest:force` — see [.cursor/skills/contextstream/SKILL.md](../.cursor/skills/contextstream/SKILL.md)). +- **Structural graph:** ContextStream **code graph** may not expose C# module edges; use keyword/semantic `search` plus the **Code map** in [ARCHITECTURE.md](ARCHITECTURE.md) for navigation. --- diff --git a/docs/TROUBLESHOOTING.md b/docs/TROUBLESHOOTING.md index 3ea1b93..a7454cf 100644 --- a/docs/TROUBLESHOOTING.md +++ b/docs/TROUBLESHOOTING.md @@ -26,6 +26,16 @@ If the dashboard or plugin "does not work", use this checklist to find the cause - **Check:** The page should show "Sim Steward" with connection status, mode, and replay controls. If the component stays blank or returns 404, the DashTemplate wasn’t deployed or SimHub cannot reach port 8888—run `deploy.ps1` again so `SimHub\Web\sim-steward-dash\` exists. - If you have configured `SIMSTEWARD_WS_TOKEN`, append `?token=` (or `?wsToken=`) to the URL in Dash Studio so the dashboard forwards the token when it opens the WebSocket. +### 3b. Browser says connection refused on `localhost:8888` (or `127.0.0.1:8888`) + +**Deploy is not an HTTP server.** `deploy.ps1` copies HTML/CSS/JS into `SimHub\Web\sim-steward-dash\`. **SimHub** must run its **built-in web server** on the configured port (default **8888**) so those files are reachable. + +- **Smoke test:** With SimHub running, open **`http://127.0.0.1:8888/`** — you should see SimHub’s dash list (same check as [SimHub wiki: Dashstudio Web access](https://github.com/SHWotever/SimHub/wiki/Troubleshoot-Dashstudio-Web-access#check-is-simhub-server-is-running)). If that refuses, the problem is SimHub’s HTTP stack or port (not this plugin). +- **Check:** SimHub **Settings** → confirm the **HTTP / web / Dash** port matches **8888** (or use your configured port in every URL). Try another port if something else owns 8888, then restart SimHub. +- **Firewall / VPN:** Allow **SimHubWPF** (incoming **8888**). VPNs can block localhost routing on some setups. +- **WebSocket vs HTTP:** The plugin can listen on **19847** while **8888** is still down — green WS in Dash Studio does not prove **8888** is up. +- **404 on `data-capture-suite.html`:** Older `deploy.ps1` only copied `index.html` and `replay-incident-index.html`. Run **`.\deploy.ps1`** again so `data-capture-suite.html` is copied to `SimHub\Web\sim-steward-dash\`. + --- ## 4. Plugin log @@ -93,6 +103,7 @@ If you run a replay and incidents are not captured or signaled: | Red status, "Cannot reach plugin" | Plugin log; port 19847 free; firewall | | Incidents not detected in replay | Section 6: shared memory, connection, focused car, plugin.log | | Blank or 404 in Web Page | URL = `http://localhost:8888/Web/sim-steward-dash/index.html`; run deploy | +| **Connection refused** on `:8888` | §3b: SimHub HTTP not listening — open `http://127.0.0.1:8888/`; Settings port/firewall | | Mode always "Unknown" | iRacing running and shared memory enabled | | No logs in Grafana / Loki | Section 8: SIMSTEWARD_LOKI_URL, local stack, auth, data source | | Log stream empty when clicking buttons | Section 4b: connection, broadcast-errors.log, browser console | @@ -129,10 +140,10 @@ For a step-by-step to get plugin data into **local** Grafana, see **docs/observa If you expect SimSteward logs in Grafana (Cloud or local) but see none: -1. **Plugin output** — The plugin writes **plugin-structured.jsonl** and **POSTs** batched lines to **`SIMSTEWARD_LOKI_URL`** (one Loki push endpoint; no separate agent on the PC). For local stacks, point that URL at `http://localhost:3100` or your **loki-gateway** push URL with `LOKI_PUSH_TOKEN` as documented in **docs/observability-local.md** and **docs/GRAFANA-LOGGING.md**. -2. **Env metadata** — Set `SIMSTEWARD_LOKI_URL` and `SIMSTEWARD_LOG_ENV` before SimHub starts (e.g. via `.env` loaded by your launcher) so log lines include `loki_push_target` / `log_env`; this does not replace ingestion. -3. **Local stack** — Start observability from `observability/local/` (`docker compose up -d`) so Loki (3100) and Grafana (3000) run; compose does **not** tail `plugin-structured.jsonl` for you. -4. **Auth (Grafana Cloud)** — Use your stack’s credentials on the **in-process** push to Grafana Cloud; wrong tokens show up as push failures in **plugin.log**, not in a separate agent’s logs. +1. **Plugin output** — The plugin writes **plugin-structured.jsonl** only (plus WebSocket to the dashboard). It does **not** batch-POST those lines to Loki in-process yet. **`deploy.ps1`** can POST a **`deploy_marker`** when **`SIMSTEWARD_LOKI_URL`** is set (see **`send-deploy-loki-marker.ps1`**). For full logs in Loki, use an external shipper to tail **plugin-structured.jsonl**. +2. **Env metadata** — Set `SIMSTEWARD_LOKI_URL` and `SIMSTEWARD_LOG_ENV` before SimHub starts (e.g. `.env` loaded by **`deploy.ps1`** / **`run-simhub-local-observability.ps1`**) so JSON includes `loki_push_target` / `log_env`. +3. **Local stack** — Start observability from `observability/local/` (`npm run obs:up`) so Loki (3100) and Grafana (3000) run; compose does **not** ingest **plugin-structured.jsonl** automatically. +4. **Auth (Grafana Cloud / gateway)** — For **deploy markers**: Grafana Cloud uses **Basic** (`SIMSTEWARD_LOKI_USER` + **`SIMSTEWARD_LOKI_TOKEN`**); local **loki-gateway** uses **Bearer `LOKI_PUSH_TOKEN`**. Push failures print in the deploy script output. 5. **Data source in Grafana** — Point the Loki data source at your Loki URL (e.g. `http://localhost:3100` for local). Explore: `{app="sim-steward"}`. 6. **Debug vs production** — With `SIMSTEWARD_LOG_DEBUG=1`, many more lines (e.g. `tick_stats`, `yaml_update`) are sent. For AI or production dashboards, filter with `| level != "DEBUG"` to avoid noise. @@ -140,7 +151,18 @@ See **docs/GRAFANA-LOGGING.md** for label schema, event taxonomy, and LogQL exam --- -## 9. ContextStream MCP (index / search / 401) +## 9. Prometheus / OTLP metrics (local stack) + +For the full pipeline (collector, ports, Grafana datasource URL), see **docs/observability-local.md** § Canonical path and § Metrics / OTLP troubleshooting. + +1. **Nothing in Explore (Prometheus Local)** — Confirm **`npm run obs:up`** is running and **`http://localhost:9090/-/healthy`** returns OK. Smoke: **`npm run obs:poll:prometheus`**. +2. **No `simsteward_*` metrics** — OTLP is disabled unless **`OTEL_EXPORTER_OTLP_ENDPOINT`** or **`SIMSTEWARD_OTLP_ENDPOINT`** is set **before** SimHub starts (SimHub does not load `.env` automatically). Use **`scripts/run-simhub-local-observability.ps1`** or set env in the user/session environment. +3. **`connection refused` to port 4317** — OpenTelemetry Collector is not up or ports are not mapped; restart compose from the repo root. +4. **Wrong protocol** — gRPC defaults for **`http://127.0.0.1:4317`**. For HTTP/protobuf on **4318**, set **`OTEL_EXPORTER_OTLP_PROTOCOL=http/protobuf`** and point the endpoint at **4318**. + +--- + +## 10. ContextStream MCP (index / search / 401) **Default workflow:** Keep the repo in sync with ContextStream using the **ContextStream MCP** **`project` tool** — `project(action="index")` or `project(action="ingest_local", path="")` — then log the run with `session(action="capture", event_type="operation", …)` per **docs/CONTEXTSTREAM-UPLOAD-PLAN.md**. Do **not** use ad-hoc HTTP/API scripts for routine sync. The CLI steps below are **troubleshooting only** when MCP or env is misconfigured. diff --git a/docs/observability-local.md b/docs/observability-local.md index 0f364ef..576d11b 100644 --- a/docs/observability-local.md +++ b/docs/observability-local.md @@ -1,6 +1,16 @@ -# Local observability (Grafana / Loki) +# Local observability (Grafana / Loki / Prometheus) -Quick start for plugin logs in local Grafana/Loki and the **loki-gateway** push endpoint. Canonical schema and events: **docs/GRAFANA-LOGGING.md**. +Quick start for plugin logs in local Grafana/Loki, **optional OTLP metrics** (OpenTelemetry Collector → Prometheus), and the **loki-gateway** push endpoint. Canonical log schema and events: **docs/GRAFANA-LOGGING.md**. Routing rationale (Loki vs metrics): **docs/DATA-ROUTING-OBSERVABILITY.md**. + +--- + +## Canonical path: metrics (local) + +**Chosen topology:** SimHub plugin → **OTLP** (gRPC default on port **4317**, or HTTP/protobuf on **4318**) → **OpenTelemetry Collector** (`otel-collector` service) → **Prometheus text** on **:8889** → **Prometheus** scrapes the collector → **Grafana** datasource `prometheus_local` (PromQL). + +- **Why not `/metrics` inside the plugin:** SimHub targets .NET Framework 4.8; exposing a pull endpoint without **HttpListener** (admin/port issues) or a separate process is awkward. OTLP to a localhost collector matches **docs/DATA-ROUTING-OBSERVABILITY.md** and keeps a single happy path for local dev. +- **Grafana → Prometheus URL:** use the Docker service name **`http://prometheus:9090`** in provisioning (not `localhost`), because Grafana runs inside the compose network. +- **Plugin → collector URL:** use **`http://127.0.0.1:4317`** (or **4318** with `OTEL_EXPORTER_OTLP_PROTOCOL=http/protobuf`) from the Windows host so SimHub resolves IPv4 reliably. --- @@ -16,23 +26,25 @@ Quick start for plugin logs in local Grafana/Loki and the **loki-gateway** push Or copy `observability/local/.env.observability.example` → `.env.observability.local`, set passwords/tokens, then `npm run obs:up:env`. Check: `npm run obs:ps`. -3. **Configure the plugin** — SimHub does not load `.env` by default. Recommended: `.\scripts\run-simhub-local-observability.ps1` (sets `SIMSTEWARD_LOKI_URL=http://localhost:3100`, `SIMSTEWARD_LOG_ENV=local`). Or set those in Windows user env and restart SimHub. See `.env.example` “Local Loki” block. +3. **Configure the plugin** — SimHub does not load `.env` by default. Recommended: `.\scripts\run-simhub-local-observability.ps1` (sets `SIMSTEWARD_LOKI_URL=http://localhost:3100`, `SIMSTEWARD_LOG_ENV=local`, and OTLP for metrics — see script). Or set those in Windows user env and restart SimHub. See `.env.example` “Local Loki” and “OTLP / Prometheus (local metrics)” blocks. + +4. **Grafana** — http://localhost:3000 → Explore → Loki → `{app="sim-steward", env="local"}`. Provisioned dashboard **Sim Steward — Deploy health** (`simsteward-deploy-health`) correlates `deploy.ps1` markers (`event=deploy_marker`) with plugin bring-up and errors. Put `SIMSTEWARD_LOKI_URL` (and `LOKI_PUSH_TOKEN` if using loki-gateway) in repo **`.env`** — `deploy.ps1` loads it automatically via `scripts/load-dotenv.ps1` (optional merge: `observability/local/.env.observability.local`). -4. **Grafana** — http://localhost:3000 → Explore → Loki → `{app="sim-steward", env="local"}`. +5. **Metrics (optional)** — With the stack up, set **`OTEL_EXPORTER_OTLP_ENDPOINT=http://127.0.0.1:4317`** (or use `SIMSTEWARD_OTLP_ENDPOINT`) before starting SimHub. After the plugin loads, Explore → **Prometheus Local** → e.g. `simsteward_process_cpu_percent` or `up{job="otel-collector"}`. Smoke: `npm run obs:poll:prometheus` or `.\scripts\poll-prometheus.ps1`. -5. **Generate traffic** — Use SimHub + web dashboard; confirm logs in **Explore** with `{app="sim-steward", env="local"}` (no repo-provisioned Grafana dashboards until you add JSON under `observability/local/grafana/provisioning/dashboards/`). +6. **Generate traffic** — Use SimHub + web dashboard; confirm logs in **Explore** with `{app="sim-steward", env="local"}` (no repo-provisioned Grafana dashboards until you add JSON under `observability/local/grafana/provisioning/dashboards/`). **Storage override:** Set `GRAFANA_STORAGE_PATH` in `.env.observability.local`; compose uses `${GRAFANA_STORAGE_PATH:-S:/sim-steward-grafana-storage}`. -**Terminal tail:** `npm run obs:poll` (direct Loki :3100) or `npm run obs:poll:grafana` / `.\scripts\poll-loki.ps1 -ViaGrafana` using **GRAFANA_API_TOKEN** (or admin user/password) in repo `.env` — same path Grafana Explore uses (`loki_local` datasource). +**Terminal tail:** `npm run obs:poll` (direct Loki :3100) or `npm run obs:poll:grafana` / `.\scripts\poll-loki.ps1 -ViaGrafana` using **GRAFANA_API_TOKEN** (or admin user/password) in repo `.env` — same path Grafana Explore uses (`loki_local` datasource). **Prometheus:** `npm run obs:poll:prometheus` / `.\scripts\poll-prometheus.ps1`. --- ## Housekeeping: wipe dashboards’ data (local) -To **clear Loki chunks/WAL** and optional Grafana bind-mount state **without** changing compose, `loki-config.yml`, datasource provisioning, `LOKI_PUSH_TOKEN`, or `SIMSTEWARD_LOKI_*`: +To **clear Loki chunks/WAL**, optional **Prometheus TSDB**, and optional Grafana bind-mount state **without** changing compose, `loki-config.yml`, datasource provisioning, `LOKI_PUSH_TOKEN`, or `SIMSTEWARD_LOKI_*`: -1. From repo root, run **`npm run obs:wipe -- -Force`** (always clears the `loki` subdirectory under `GRAFANA_STORAGE_PATH`). +1. From repo root, run **`npm run obs:wipe -- -Force`** (clears the `loki` and **`prometheus`** subdirectories under `GRAFANA_STORAGE_PATH`). 2. Optional flags: **`-Grafana`** (wipes `grafana.db`; re-run `scripts/grafana-bootstrap.ps1` if you use `GRAFANA_API_TOKEN`), **`-SampleLogs`** (clears `observability/local/sample-logs/*` files), or **`-All`** for both. Equivalent: `.\scripts\obs-wipe-local-data.ps1 -Force` (same switches). @@ -43,13 +55,18 @@ Equivalent: `.\scripts\obs-wipe-local-data.ps1 -Force` (same switches). ## Loki gateway (token-protected push) -The repo stack includes **Grafana**, **Loki**, and **loki-gateway** (nginx). The plugin writes **`plugin-structured.jsonl`** and **POSTs** batches to **`SIMSTEWARD_LOKI_URL`** (no separate log agent). For this compose, set that URL to `http://localhost:3100` (Loki) or `http://localhost:3500` (gateway) and use `Authorization: Bearer ` when using the gateway — see **docs/GRAFANA-LOGGING.md**. +The repo stack includes **Grafana**, **Loki**, and **loki-gateway** (nginx). The plugin writes **`plugin-structured.jsonl`** on disk (and streams logs over WebSocket to the dashboard); **compose does not tail that file** — rely on **`send-deploy-loki-marker.ps1`** (called from **`deploy.ps1`**) to **POST** deploy markers when **`SIMSTEWARD_LOKI_URL`** is set. For pushes, use `http://localhost:3100` (Loki) or `http://localhost:3500` (gateway) with `Authorization: Bearer ` on the gateway — see **docs/GRAFANA-LOGGING.md**. | Service | URL | |---------|-----| | Grafana | http://localhost:3000 | | Loki (query / direct API) | http://localhost:3100 | | loki-gateway (push) | http://localhost:3500 | +| OpenTelemetry Collector (OTLP gRPC) | `http://127.0.0.1:4317` (host → container) | +| OpenTelemetry Collector (OTLP HTTP) | `http://127.0.0.1:4318` | +| Collector Prometheus exporter (host curl / debug) | `http://127.0.0.1:18889/metrics` (mapped from container **8889**; Prometheus scrapes `otel-collector:8889` inside compose) | +| Prometheus (UI / API) | http://localhost:9090 | +| Collector health_check | http://127.0.0.1:13133 | Files under `observability/local/`. Security: `LOKI_PUSH_TOKEN` required for `POST /loki/api/v1/push` on the gateway; gateway denies other routes. @@ -61,6 +78,35 @@ Files under `observability/local/`. Security: `LOKI_PUSH_TOKEN` required for `PO **Troubleshooting:** Token format `Bearer `; ensure `plugin-structured.jsonl` is actually ingested (see **docs/TROUBLESHOOTING.md** §8). +### Port collisions (Docker bind errors) + +The stack publishes these **host** ports together; any other process (or second compose project) using the same port will prevent `docker compose up`: + +| Host port | Service | +|-----------|---------| +| 3000 | Grafana | +| 3100 | Loki | +| 3500 | loki-gateway | +| 4317, 4318 | OpenTelemetry Collector (OTLP) | +| 8080 | data-api | +| 9090 | Prometheus | +| 13133 | Collector `health_check` | +| 18889 | Collector Prometheus exporter (host; container listens on 8889) | + +**SimHub** (separate from Docker) commonly uses **8888** (HTTP) and **19847** (Sim Steward WebSocket default). Those can collide with unrelated tools, not usually with this compose file. + +**Audit script:** from repo root run `pwsh -NoProfile -File scripts/check-obs-ports.ps1` to see what is already listening on these ports (and owning process name). + +**Typical conflicts:** **3000** (other Grafana, React dev server), **8080** (many dev backends), **9090** (another Prometheus), **4317/4318** (another OTel collector or agent). **8889:** On some setups **SimHub (`SimHubWPF.exe`)** also listens on **8889** alongside **8888** — that blocks mapping collector **8889** to the host, which is why compose publishes **`18889:8889`** (Prometheus still scrapes `otel-collector:8889` inside Docker). + +### Metrics / OTLP troubleshooting + +- **`up{job="otel-collector"} == 0`** — Prometheus cannot reach the collector on `otel-collector:8889` (compose network). Confirm `otel-collector` is running: `npm run obs:ps`. +- **No `simsteward_*` series** — OTLP is off until **`OTEL_EXPORTER_OTLP_ENDPOINT`** or **`SIMSTEWARD_OTLP_ENDPOINT`** is set **before** SimHub starts. Use **`http://127.0.0.1:4317`** for gRPC; for port **4318** set **`OTEL_EXPORTER_OTLP_PROTOCOL=http/protobuf`**. +- **Connection refused on 4317** — Collector not started or ports not published; run `npm run obs:up` from repo root. +- **Grafana Prometheus query errors** — Datasource must be **`http://prometheus:9090`** (container DNS), not `localhost:9090`. +- **Loki remains authoritative** for `host_resource_sample` until you rely on Prom-only SLOs; metrics duplicate CPU/working set at OTLP export cadence. + --- ## See also diff --git a/docs/observability-scaling.md b/docs/observability-scaling.md index 2ad41c1..d35987e 100644 --- a/docs/observability-scaling.md +++ b/docs/observability-scaling.md @@ -40,19 +40,18 @@ Per-driver per-tick telemetry is time-series data; use metrics (OTel), not Loki. Local Docker + Loki per developer does **not** scale to ~120 users each running the full stack. -### Current pipeline (local, single-user) +### Current pipeline (this repo, local single-user) -Plugin → `plugin-structured.jsonl` (durability) → batched **HTTPS POST** to `SIMSTEWARD_LOKI_URL` → Loki. **No** separate agent on the user machine; push runs in-process (batching keeps `DataUpdate()` off the hot path). +Plugin → **`plugin-structured.jsonl`** (durability) + WebSocket to dashboard. **No** in-process Loki POST in `SimSteward.Plugin` today. Optional: **`deploy.ps1`** → **`send-deploy-loki-marker.ps1`** POSTs one **`deploy_marker`** line when **`SIMSTEWARD_LOKI_URL`** is set. +### Target / production shape -### Implementation - -Always write **`plugin-structured.jsonl`**. When **`SIMSTEWARD_LOKI_URL`** is set, batch and **POST** NDJSON to that **single** Loki HTTP endpoint (central or Grafana Cloud). No separate forwarder process. +Always write **`plugin-structured.jsonl`**. **Intended:** batch **HTTPS POST** of NDJSON to **`SIMSTEWARD_LOKI_URL`** from inside the plugin (or an approved sidecar) — **one** Loki HTTP endpoint (central or Grafana Cloud). ### Recommendation -- **Default:** Plugin writes **plugin-structured.jsonl** locally and **POSTs** batched NDJSON to **one** Loki HTTP endpoint (`SIMSTEWARD_LOKI_URL`) from inside the plugin — **no** separate log shipper or forwarder on user machines. -- **Many users:** Same pattern: many plugin instances → one central Loki; scale ingestion/retention to `users × volume per session`. +- **Today:** Run a **file tail → Loki** agent for **`plugin-structured.jsonl`**, or wait for in-process batch POST. +- **Many users:** Same pattern: many instances → one central Loki; scale ingestion/retention to `users × volume per session`. ### Central Loki / Grafana Cloud diff --git a/docs/superpowers/specs/2026-03-24-contextstream-loki-hook-design.md b/docs/superpowers/specs/2026-03-24-contextstream-loki-hook-design.md new file mode 100644 index 0000000..c777fc1 --- /dev/null +++ b/docs/superpowers/specs/2026-03-24-contextstream-loki-hook-design.md @@ -0,0 +1,400 @@ +# ContextStream MCP PostToolUse Hook → Loki + +**Date:** 2026-03-24 +**Status:** Draft +**Schema version:** 1 +**Scope:** A single PowerShell script registered as a Claude Code `PostToolUse` hook that intercepts every `mcp__contextstream__*` tool call, infers structural metadata, extracts object manifests from responses, and POSTs directly to Loki. No user content, titles, prompts, or descriptions are ever logged. + +--- + +## 1. Goals + +1. Full observability on every ContextStream MCP tool call in Grafana. +2. Log commands verbosely (tool, action, object type, IDs, counts, sizes, statuses). +3. Protect product IP — never log prompt text, content bodies, titles, descriptions, or query strings. +4. Capture object manifests so support can reference the structural state of ContextStream objects without seeing what they contain. +5. Fallback to local JSONL file when Loki is unreachable. + +## 2. Non-Goals + +- Auto-draining the fallback file into Loki on recovery. +- Logging non-ContextStream tool calls. +- Measuring call duration (not available in PostToolUse contract). +- Replacing or altering Claude's view of tool results. + +--- + +## 3. Architecture + +``` +Claude Code + │ + ├─ mcp__contextstream__* call executes + │ + └─ PostToolUse hook fires + │ + └─ scripts/contextstream-loki-hook.ps1 + │ + ├─ Parse stdin JSON (tool_name, tool_input, tool_response) + ├─ Filter: exit 0 immediately if not mcp__contextstream__* + ├─ Truncate tool_response to 16 KB before parsing + ├─ Build contextstream_tool_call JSONL line + ├─ Build contextstream_object_manifest JSONL line (if objects found) + ├─ POST to http://localhost:3100/loki/api/v1/push + │ ├─ Success → exit 0 (no stdout) + │ └─ Failure → append to fallback JSONL, exit 1 + └─ Total budget: < 2 seconds +``` + +## 4. Hook Registration + +File: `.claude/settings.json` + +```json +{ + "hooks": { + "PostToolUse": [ + { + "matcher": "mcp__contextstream__.*", + "hooks": [ + { + "type": "command", + "command": "powershell.exe -NoProfile -ExecutionPolicy Bypass -File scripts/contextstream-loki-hook.ps1", + "timeout": 10 + } + ] + } + ] + } +} +``` + +Key decisions: +- `powershell.exe` (Windows PowerShell 5.1) not `pwsh` — faster cold start (~100-200ms vs ~300-800ms). +- `timeout: 10` — hard cap; prevents any edge case from stalling Claude Code. +- `-NoProfile -ExecutionPolicy Bypass` — minimal startup, no user profile overhead. +- Matcher is **regex** (`.*`), not glob (`*`). + +## 5. Hook Input Contract + +Claude Code provides JSON on stdin. Read via `[Console]::In.ReadToEnd()` — do **not** use `$input` which is unreliable for piped stdin in `powershell.exe -File` mode. + +```json +{ + "session_id": "string", + "transcript_path": "/path/to/transcript.jsonl", + "cwd": "/working/directory", + "hook_event_name": "PostToolUse", + "tool_name": "mcp__contextstream__memory", + "tool_use_id": "toolu_01ABC123...", + "tool_input": { + "action": "create_doc", + "doc_type": "spec", + "title": "SENSITIVE — NOT LOGGED", + "content": "SENSITIVE — NOT LOGGED" + }, + "tool_response": { } +} +``` + +## 6. Hook Output Contract + +- **Success:** Exit 0, no stdout. Silent pass-through — Claude sees the original tool result unmodified. +- **Failure:** Exit 1, no stdout. Non-blocking — Claude Code logs in verbose mode only; user is not affected. +- **Never** write JSON to stdout — Claude Code would interpret it as hook decisions (block/modify behavior). + +## 7. Loki Labels + +Four labels only (consistent with project schema): + +| Label | Value | +|-------|-------| +| `app` | `claude-dev-logging` | +| `env` | Value of `$env:SIMSTEWARD_LOG_ENV`, default `local` | +| `component` | `mcp-contextstream` | +| `level` | `INFO` (normal) or `ERROR` (hook caught a failure) | + +## 8. Event Types + +### 8.1 `contextstream_tool_call` + +Emitted for every intercepted tool call. + +```json +{ + "schema_version": 1, + "timestamp": "2026-03-24T14:30:00.000Z", + "event": "contextstream_tool_call", + "tool": "mcp__contextstream__memory", + "action": "create_doc", + "tool_use_id": "toolu_01ABC123", + "session_id": "sess_xyz", + "object_type": "doc", + "object_id": "abc-123-def", + "mode": null, + "query_length": 0, + "content_length": 2450, + "response_size_bytes": 312, + "result_count": null, + "success": true, + "error_summary": null +} +``` + +**Success detection:** The `success` field is determined by: +1. If `tool_response` is not valid JSON → `success: false`, `error_summary` = first 200 chars of raw string. +2. If `tool_response` contains an `error` or `message` field at top level → `success: false`, `error_summary` = that field truncated to 200 chars (checked against sensitive field blocklist — strip any content that matches). +3. Otherwise → `success: true`. + +**Action field:** When `tool_input.action` is absent (e.g., `init`, `context`, `search` tools), `action` is derived from the tool name suffix (e.g., `mcp__contextstream__init` → `"init"`). + +### 8.2 `contextstream_object_manifest` + +Emitted when the response contains or references objects (single or collection). + +```json +{ + "timestamp": "2026-03-24T14:30:00.000Z", + "event": "contextstream_object_manifest", + "tool": "mcp__contextstream__memory", + "action": "list_docs", + "tool_use_id": "toolu_01ABC123", + "session_id": "sess_xyz", + "manifest": { + "object_type": "doc", + "total_count": 5, + "truncated": false, + "objects": [ + { + "id": "abc-123", + "doc_type": "spec", + "content_length": 4500, + "created_at": "2026-03-20T10:00:00Z", + "updated_at": "2026-03-22T15:30:00Z" + } + ] + } +} +``` + +## 9. Allowlisted Fields + +The hook uses an **allowlist** approach — only these fields are extracted from `tool_input`. Everything else is ignored. + +### 9.1 Safe `tool_input` fields (logged) + +| Field | Logged as | +|-------|-----------| +| `action` | `action` | +| `session_id` | `session_id` (structural ID, safe) | +| `client_name` | `client_name` | +| `mode` | `mode` | +| `node_type` | inferred → `object_type` | +| `doc_type` | manifest → `doc_type` | +| `diagram_type` | manifest → `diagram_type` | +| `event_type` | manifest → `event_type` | +| `node_id` | `object_id` | +| `doc_id` | `object_id` | +| `diagram_id` | `object_id` | +| `task_id` | `object_id` | +| `todo_id` | `object_id` | +| `plan_id` | `object_id` or manifest field | +| `event_id` | `object_id` | +| `reminder_id` | `object_id` | +| `skill_id` | `object_id` | +| `transcript_id` | `object_id` | +| `task_status` | manifest → `task_status` | +| `todo_status` | manifest → `todo_status` | +| `todo_priority` | manifest → `todo_priority` | +| `priority` | manifest → `priority` | +| `limit` | `result_count` context | +| `is_personal` | manifest field | + +### 9.2 Sensitive `tool_input` fields (never logged) + +- `title`, `content`, `description`, `query`, `user_message`, `assistant_message` +- `new_content`, `blocked_reason`, `reason`, `modified_instruction` +- `steps`, `milestones`, `events`, `entries`, `keywords` +- `impact`, `prevention`, `trigger` + +For these, only the **character count** is logged (e.g., `content_length: 2450`). + +**Note:** Any field not on the allowlist is automatically excluded. The blocklist above is documentation-only; the implementation uses allowlist-only extraction. New fields added to ContextStream in the future are excluded by default. + +### 9.3 Safe `tool_response` fields for manifests + +Per object type: + +| Object type | Extracted fields | +|-------------|-----------------| +| `doc` | `id`, `doc_type`, `content_length`*, `created_at`, `updated_at` | +| `node` | `id`, `node_type`, `created_at`, `updated_at`, `superseded_by` | +| `plan` | `id`, `status`, `step_count`*, `created_at`, `updated_at` | +| `task` | `id`, `task_status`, `priority`, `plan_id`, `created_at`, `updated_at` | +| `todo` | `id`, `todo_status`, `todo_priority`, `created_at`, `updated_at` | +| `diagram` | `id`, `diagram_type`, `content_length`*, `created_at`, `updated_at` | +| `skill` | `id`, `status`, `category_count`*, `created_at`, `updated_at` | +| `event` | `id`, `event_type`, `content_length`*, `created_at` | +| `reminder` | `id`, `priority`, `status`, `remind_at`, `created_at` | +| `transcript` | `id`, `client_name`, `started_at` | + +*`content_length` and `step_count` are computed by the hook (string length / array length), not raw content. + +## 10. Object Type Inference + +The hook infers `object_type` from the combination of tool name and action: + +``` +mcp__contextstream__memory + action contains "doc" → doc +mcp__contextstream__memory + action contains "node" → node +mcp__contextstream__memory + action contains "task" → task +mcp__contextstream__memory + action contains "todo" → todo +mcp__contextstream__memory + action contains "diagram" → diagram +mcp__contextstream__memory + action contains "event" → event +mcp__contextstream__memory + action contains "transcript"→ transcript +mcp__contextstream__memory + action = "search" → memory_search +mcp__contextstream__memory + action = "decisions" → decision +mcp__contextstream__session + action contains "plan" → plan +mcp__contextstream__session + action contains "lesson" → lesson +mcp__contextstream__session + action = "capture" → session_event +mcp__contextstream__session + action = "remember" → memory +mcp__contextstream__session + action = "recall" → memory +mcp__contextstream__search → search +mcp__contextstream__skill → skill +mcp__contextstream__reminder → reminder +mcp__contextstream__project → project +mcp__contextstream__workspace → workspace +mcp__contextstream__graph → graph +mcp__contextstream__help → help +mcp__contextstream__init → init +mcp__contextstream__context → context +mcp__contextstream__instruct → instruct +mcp__contextstream__ram → ram +mcp__contextstream__media → media +mcp__contextstream__integration → integration +(any other mcp__contextstream__X) → X (fallback: use tool suffix) +``` + +**Note:** The `context` and `search` tools frequently return responses exceeding 16 KB. For these tools, only the `tool_call` event is emitted; manifest extraction is skipped by design due to truncation. + +## 11. Loki Push Format + +HTTP POST to `http://localhost:3100/loki/api/v1/push` + +```json +{ + "streams": [ + { + "stream": { + "app": "claude-dev-logging", + "env": "local", + "component": "mcp-contextstream", + "level": "INFO" + }, + "values": [ + ["1711288200000000000", "{\"event\":\"contextstream_tool_call\",...}"], + ["1711288200000000001", "{\"event\":\"contextstream_object_manifest\",...}"] + ] + } + ] +} +``` + +- Timestamp is Unix nanoseconds (required by Loki push API). +- Both lines share the same stream labels. +- The manifest line gets timestamp +1 ns to ensure ordering. + +## 12. Response Parsing Safety + +1. **Input truncation:** Truncate `tool_response` string to 16 KB before attempting JSON parse. This prevents memory issues on large list responses. +2. **Defensive parsing:** If `tool_response` is not valid JSON (plain string, error text), skip manifest extraction. Still emit the `tool_call` line with `success: false`. +3. **Size cap on manifest output:** If the serialized manifest exceeds 8 KB, truncate the `objects` array and set `truncated: true` with `original_count`. +4. **`result_count` extraction heuristic:** If the response contains a `results` array, count its length. If it contains `files`, count those. If it contains a top-level `total` or `count` field, use that. Otherwise `null`. + +### 12.1 PowerShell 5.1 JSON constraints + +- **`ConvertFrom-Json`:** Does not support `-Depth` parameter. After parsing, validate the result is an object (`-is [PSCustomObject]` or `[Hashtable]`), not a string. Deeply nested responses may silently truncate. +- **`ConvertTo-Json`:** Defaults to depth 2. The manifest has 3+ levels of nesting. **Every `ConvertTo-Json` call MUST use `-Depth 10`** to prevent `objects` array entries serializing as `System.Collections.Hashtable`. +- **Nanosecond timestamps:** Must be emitted as **strings** in the JSON payload (as shown in Section 11). Use `.ToString()` — do not rely on `ConvertTo-Json` which may emit scientific notation for 19-digit integers. +- **Content-Type:** `Invoke-RestMethod` POST must include `-ContentType 'application/json'`. + +## 13. Fallback File + +**Path:** `observability/local/logs/contextstream-loki-fallback.jsonl` + +Behavior: +- On Loki POST failure (connection refused, timeout, non-2xx), append the same JSONL lines to this file. +- Create the directory on first write if it doesn't exist. +- Use `[System.IO.File]::AppendAllText` — atomic at the OS level for appends under typical NTFS conditions. +- No auto-drain on recovery. Manual concern for now. + +## 14. Concurrency + +Multiple Claude Code tool calls can fire hooks in parallel. Mitigations: +- **Loki POST:** Stateless HTTP — parallel POSTs are safe. +- **Fallback file:** Use a named mutex (`'Global\ContextStreamLokiFallback'` — single-quoted to prevent backslash interpolation) around file writes to prevent interleaved lines. + +## 15. File Layout + +``` +scripts/ + contextstream-loki-hook.ps1 # The hook script +observability/ + local/ + logs/ + contextstream-loki-fallback.jsonl # Fallback (created on first failure) +.claude/ + settings.json # Hook registration (updated) +``` + +## 16. Testing + +### Hook fires verification (do this first) +1. Add a temporary canary line at the top of `contextstream-loki-hook.ps1`: `[System.IO.File]::AppendAllText("$PSScriptRoot\canary.txt", "fired $(Get-Date)`n")` +2. Make any ContextStream MCP call. +3. Verify `scripts/canary.txt` was created. If not, the hook registration is wrong. +4. Remove the canary line after confirming. + +### Manual verification +1. Start the local Loki stack (`docker compose up` in `observability/local/`). +2. Open Grafana at `localhost:3000`. +3. Run the ContextStream test suite from `tests/contextstream-mcp-test-prompt.md`. +4. Query Grafana: `{app="claude-dev-logging", component="mcp-contextstream"}`. +5. Verify: tool_call lines for every MCP call, manifest lines for object-returning calls. + +### Fallback verification +1. Stop Loki. +2. Make a ContextStream MCP call. +3. Verify `contextstream-loki-fallback.jsonl` contains the lines. + +### Sensitive data audit +1. Run the test suite. +2. Grep all logged lines for known test content strings (e.g., "[CS-TEST]", "Round-trip test node"). +3. Verify: zero matches. Only IDs, types, counts, and timestamps should appear. + +## 17. Grafana Query Examples + +**All ContextStream calls in the last hour:** +```logql +{app="claude-dev-logging", component="mcp-contextstream"} | json | event = "contextstream_tool_call" +``` + +**Failed calls:** +```logql +{app="claude-dev-logging", component="mcp-contextstream", level="ERROR"} | json | success = false +``` + +**Object manifest for docs:** +```logql +{app="claude-dev-logging", component="mcp-contextstream"} | json | event = "contextstream_object_manifest" | manifest_object_type = "doc" +``` + +**All actions on a specific object ID:** +```logql +{app="claude-dev-logging", component="mcp-contextstream"} | json | object_id = "abc-123-def" +``` + +**Call volume by action (last 24h):** +```logql +sum by (action) (count_over_time({app="claude-dev-logging", component="mcp-contextstream"} | json | event = "contextstream_tool_call" [24h])) +``` diff --git a/observability/local/.env.observability.example b/observability/local/.env.observability.example new file mode 100644 index 0000000..e30ff1c --- /dev/null +++ b/observability/local/.env.observability.example @@ -0,0 +1,19 @@ +# Local observability stack environment variables example +# Copy this file to .env.observability.local and fill in values. + +# Path where Loki, Grafana, and cursor-usage persistent data will be stored on the host. +# Example: S:/sim-steward-grafana-storage or C:/grafana-storage +GRAFANA_STORAGE_PATH= + +# Token required by loki-gateway for pushing logs. Generate a strong token. +# Example (PowerShell): [Convert]::ToBase64String((1..48 | ForEach-Object { Get-Random -Maximum 256 })) +LOKI_PUSH_TOKEN= + +# Path to the SimHub plugin data directory on the host. Alloy tails plugin-structured.jsonl from here. +# Example Windows: C:/Users//AppData/Local/SimHubWpf/PluginsData/SimSteward +SIMSTEWARD_DATA_PATH= + +# Grafana login (compose substitutes into GF_SECURITY_ADMIN_*). Used only when Grafana has no DB yet. +# If you forgot the password, stop the stack, wipe the Grafana volume (npm run obs:wipe -- -Force -Grafana), then up again. +GRAFANA_ADMIN_USER=admin +GRAFANA_ADMIN_PASSWORD=change-me diff --git a/observability/local/config.alloy b/observability/local/config.alloy new file mode 100644 index 0000000..277f2a5 --- /dev/null +++ b/observability/local/config.alloy @@ -0,0 +1,43 @@ +// Grafana Alloy — tail plugin-structured.jsonl → Loki +// Docs: https://grafana.com/docs/alloy/latest/ + +local.file_match "simsteward_structured" { + path_targets = [{"__path__" = "/var/log/simsteward/plugin-structured.jsonl"}] + sync_period = "5s" +} + +loki.source.file "simsteward_structured" { + targets = local.file_match.simsteward_structured.targets + forward_to = [loki.process.simsteward.receiver] + + tail_from_end = true +} + +loki.process "simsteward" { + forward_to = [loki.write.local.receiver] + + // Extract low-cardinality labels from JSON; everything else stays in the log line. + stage.json { + expressions = { + level = "level", + component = "component", + event = "event", + domain = "domain", + } + } + + stage.labels { + values = { + level = "", + component = "", + event = "", + domain = "", + } + } +} + +loki.write "local" { + endpoint { + url = "http://loki:3100/loki/api/v1/push" + } +} diff --git a/observability/local/docker-compose.yml b/observability/local/docker-compose.yml index df097fa..ebde21e 100644 --- a/observability/local/docker-compose.yml +++ b/observability/local/docker-compose.yml @@ -32,11 +32,45 @@ services: timeout: 5s retries: 10 + otel-collector: + image: otel/opentelemetry-collector-contrib:0.115.1 + command: ["--config=/etc/otel-collector-config.yaml"] + volumes: + - ./otel-collector-config.yaml:/etc/otel-collector-config.yaml:ro + ports: + - "4317:4317" + - "4318:4318" + # Host 18889 avoids conflict with other tools binding Windows :8889; Prometheus still scrapes otel-collector:8889 on the Docker network. + - "18889:8889" + - "13133:13133" + + prometheus: + image: prom/prometheus:v2.55.1 + depends_on: + - otel-collector + command: + - "--config.file=/etc/prometheus/prometheus.yml" + - "--storage.tsdb.path=/prometheus" + - "--storage.tsdb.retention.time=15d" + - "--web.enable-lifecycle" + ports: + - "9090:9090" + volumes: + - ./prometheus.yml:/etc/prometheus/prometheus.yml:ro + - ${GRAFANA_STORAGE_PATH:-S:/sim-steward-grafana-storage}/prometheus:/prometheus + healthcheck: + test: ["CMD", "wget", "-q", "-O", "-", "http://127.0.0.1:9090/-/healthy"] + interval: 10s + timeout: 5s + retries: 10 + grafana: image: grafana/grafana:11.2.0 depends_on: loki: condition: service_healthy + prometheus: + condition: service_healthy ports: - "3000:3000" environment: @@ -47,6 +81,17 @@ services: - ${GRAFANA_STORAGE_PATH:-S:/sim-steward-grafana-storage}/grafana:/var/lib/grafana - ./grafana/provisioning:/etc/grafana/provisioning:ro + alloy: + image: grafana/alloy:v1.5.1 + depends_on: + loki: + condition: service_healthy + volumes: + - ./config.alloy:/etc/alloy/config.alloy:ro + - ${SIMSTEWARD_DATA_PATH}:/var/log/simsteward:ro + - ${GRAFANA_STORAGE_PATH:-S:/sim-steward-grafana-storage}/alloy:/tmp/positions + command: ["run", "/etc/alloy/config.alloy", "--storage.path=/tmp/positions"] + data-api: build: ./data-api ports: diff --git a/observability/local/grafana/provisioning/dashboards/simsteward-deploy-health.json b/observability/local/grafana/provisioning/dashboards/simsteward-deploy-health.json new file mode 100644 index 0000000..7be1cdb --- /dev/null +++ b/observability/local/grafana/provisioning/dashboards/simsteward-deploy-health.json @@ -0,0 +1,217 @@ +{ + "annotations": { "list": [] }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 1, + "id": null, + "links": [], + "liveNow": false, + "panels": [ + { + "gridPos": { "h": 3, "w": 24, "x": 0, "y": 0 }, + "id": 1, + "options": { + "code": { "language": "markdown", "showLineNumbers": false, "showMiniMap": false }, + "content": "**Deploy health** — Correlates `deploy.ps1` with plugin bring-up in Loki.\n\n- **Deploy markers** — Lines pushed at end of `deploy.ps1` when `SIMSTEWARD_LOKI_URL` is set (`event=deploy_marker`). `post_deploy_warn=true` means post-deploy `tests/*.ps1` failed after retry.\n- **Plugin / bridge** — `plugin_ready` and `bridge_start_failed` show whether SimHub loaded the plugin and WebSocket started.\n- **Errors** — Structured ERROR lines; spike after a bad deploy often means SimHub/plugin mismatch or WS failure.\n\nOpen repo `deploy.ps1` console output for copy failures; this dashboard is **telemetry**, not a full deploy log.", + "mode": "markdown" + }, + "title": "About", + "type": "text" + }, + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "gridPos": { "h": 10, "w": 24, "x": 0, "y": 3 }, + "id": 2, + "options": { + "dedupStrategy": "none", + "enableLogDetails": true, + "prettifyLogMessage": true, + "showCommonLabels": false, + "showLabels": false, + "showTime": true, + "sortOrder": "Descending", + "wrapLogMessage": true + }, + "targets": [ + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "editorMode": "code", + "expr": "{app=\"sim-steward\", env=\"${env}\"} | json | event=\"deploy_marker\"", + "queryType": "range", + "refId": "A" + } + ], + "title": "Deploy markers (deploy.ps1 → Loki)", + "type": "logs" + }, + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "gridPos": { "h": 9, "w": 12, "x": 0, "y": 13 }, + "id": 3, + "options": { + "dedupStrategy": "none", + "enableLogDetails": true, + "prettifyLogMessage": true, + "showCommonLabels": false, + "showLabels": false, + "showTime": true, + "sortOrder": "Descending", + "wrapLogMessage": true + }, + "targets": [ + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "editorMode": "code", + "expr": "{app=\"sim-steward\", env=\"${env}\"} | json | event =~ \"plugin_ready|plugin_started|bridge_starting\"", + "queryType": "range", + "refId": "A" + } + ], + "title": "Plugin / bridge lifecycle", + "type": "logs" + }, + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "gridPos": { "h": 9, "w": 12, "x": 12, "y": 13 }, + "id": 4, + "options": { + "dedupStrategy": "none", + "enableLogDetails": true, + "prettifyLogMessage": true, + "showCommonLabels": false, + "showLabels": false, + "showTime": true, + "sortOrder": "Descending", + "wrapLogMessage": true + }, + "targets": [ + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "editorMode": "code", + "expr": "{app=\"sim-steward\", env=\"${env}\"} | json | event=\"bridge_start_failed\"", + "queryType": "range", + "refId": "A" + } + ], + "title": "WebSocket bridge failures", + "type": "logs" + }, + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "drawStyle": "bars", + "fillOpacity": 40, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { "h": 8, "w": 24, "x": 0, "y": 22 }, + "id": 5, + "options": { + "legend": { "displayMode": "list", "placement": "bottom", "showLegend": true }, + "tooltip": { "mode": "single", "sort": "none" } + }, + "targets": [ + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "editorMode": "code", + "expr": "sum(count_over_time({app=\"sim-steward\", env=\"${env}\", level=\"ERROR\"} [5m]))", + "legendFormat": "ERROR lines / 5m", + "queryType": "range", + "refId": "A" + } + ], + "title": "ERROR log volume (5m buckets)", + "type": "timeseries" + }, + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "gridPos": { "h": 12, "w": 24, "x": 0, "y": 30 }, + "id": 6, + "options": { + "dedupStrategy": "none", + "enableLogDetails": true, + "prettifyLogMessage": true, + "showCommonLabels": false, + "showLabels": false, + "showTime": true, + "sortOrder": "Descending", + "wrapLogMessage": true + }, + "targets": [ + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "editorMode": "code", + "expr": "{app=\"sim-steward\", env=\"${env}\", level=\"ERROR\"}", + "queryType": "range", + "refId": "A" + } + ], + "title": "Recent ERROR lines (full)", + "type": "logs" + }, + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "gridPos": { "h": 10, "w": 24, "x": 0, "y": 42 }, + "id": 7, + "options": { + "dedupStrategy": "none", + "enableLogDetails": true, + "prettifyLogMessage": true, + "showCommonLabels": false, + "showLabels": false, + "showTime": true, + "sortOrder": "Descending", + "wrapLogMessage": true + }, + "targets": [ + { + "datasource": { "type": "loki", "uid": "loki_local" }, + "editorMode": "code", + "expr": "{app=\"sim-steward\", env=\"${env}\"} | json | event=\"action_result\" | success != true", + "queryType": "range", + "refId": "A" + } + ], + "title": "Failed actions (action_result success=false)", + "type": "logs" + } + ], + "refresh": "30s", + "schemaVersion": 39, + "tags": ["sim-steward", "deploy"], + "templating": { + "list": [ + { + "current": { "selected": true, "text": "local", "value": "local" }, + "hide": 0, + "includeAll": false, + "label": "env", + "name": "env", + "options": [ + { "selected": true, "text": "local", "value": "local" }, + { "selected": false, "text": "production", "value": "production" } + ], + "query": "local,production", + "skipUrlSync": false, + "type": "custom" + } + ] + }, + "time": { "from": "now-24h", "to": "now" }, + "timepicker": {}, + "timezone": "browser", + "title": "Sim Steward — Deploy health", + "uid": "simsteward-deploy-health", + "version": 1, + "weekStart": "" +} diff --git a/observability/local/grafana/provisioning/datasources/prometheus.yml b/observability/local/grafana/provisioning/datasources/prometheus.yml new file mode 100644 index 0000000..5197b5a --- /dev/null +++ b/observability/local/grafana/provisioning/datasources/prometheus.yml @@ -0,0 +1,13 @@ +apiVersion: 1 + +datasources: + - name: Prometheus Local + type: prometheus + uid: prometheus_local + access: proxy + url: http://prometheus:9090 + isDefault: false + editable: false + jsonData: + httpMethod: POST + timeInterval: 15s diff --git a/observability/local/otel-collector-config.yaml b/observability/local/otel-collector-config.yaml new file mode 100644 index 0000000..9815561 --- /dev/null +++ b/observability/local/otel-collector-config.yaml @@ -0,0 +1,36 @@ +# OpenTelemetry Collector (contrib): OTLP in → Prometheus text format out for local Prometheus scrape. +# See docs/observability-local.md § Metrics (canonical local path). + +extensions: + health_check: + endpoint: 0.0.0.0:13133 + +receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + +processors: + batch: {} + memory_limiter: + check_interval: 1s + limit_mib: 512 + spike_limit_mib: 128 + +exporters: + prometheus: + endpoint: 0.0.0.0:8889 + namespace: "" + const_labels: + cluster: local + +service: + extensions: [health_check] + pipelines: + metrics: + receivers: [otlp] + processors: [memory_limiter, batch] + exporters: [prometheus] diff --git a/observability/local/prometheus.yml b/observability/local/prometheus.yml new file mode 100644 index 0000000..29ba753 --- /dev/null +++ b/observability/local/prometheus.yml @@ -0,0 +1,19 @@ +# Local Prometheus: scrape collector (OTLP-derived metrics) and self-monitor. +# Grafana datasource uses Docker DNS: http://prometheus:9090 + +global: + scrape_interval: 15s + scrape_timeout: 10s + evaluation_interval: 15s + external_labels: + cluster: local + +scrape_configs: + - job_name: prometheus + static_configs: + - targets: ["localhost:9090"] + + - job_name: otel-collector + metrics_path: /metrics + static_configs: + - targets: ["otel-collector:8889"] diff --git a/package-lock.json b/package-lock.json index f11e127..71ce4a4 100644 --- a/package-lock.json +++ b/package-lock.json @@ -6,9 +6,498 @@ "": { "name": "sim-steward-plugin", "devDependencies": { - "dotenv-cli": "^7.4.4" + "@secretlint/secretlint-rule-preset-recommend": "^11.4.0", + "dotenv-cli": "^7.4.4", + "secretlint": "^11.4.0" } }, + "node_modules/@azu/format-text": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@azu/format-text/-/format-text-1.0.2.tgz", + "integrity": "sha512-Swi4N7Edy1Eqq82GxgEECXSSLyn6GOb5htRFPzBDdUkECGXtlf12ynO5oJSpWKPwCaUssOu7NfhDcCWpIC6Ywg==", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/@azu/style-format": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@azu/style-format/-/style-format-1.0.1.tgz", + "integrity": "sha512-AHcTojlNBdD/3/KxIKlg8sxIWHfOtQszLvOpagLTO+bjC3u7SAszu1lf//u7JJC50aUSH+BVWDD/KvaA6Gfn5g==", + "dev": true, + "license": "WTFPL", + "dependencies": { + "@azu/format-text": "^1.0.1" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.29.0.tgz", + "integrity": "sha512-9NhCeYjq9+3uxgdtp20LSiJXJvN0FeCtNGpJxuMFZ1Kv3cWUNb6DOhJwUvcVCzKGR66cw4njwM6hrJLqgOwbcw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-validator-identifier": "^7.28.5", + "js-tokens": "^4.0.0", + "picocolors": "^1.1.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz", + "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@secretlint/config-creator": { + "version": "11.4.0", + "resolved": "https://registry.npmjs.org/@secretlint/config-creator/-/config-creator-11.4.0.tgz", + "integrity": "sha512-6/WibDQky7tyHNmE5fOe1rLYtg9h/oxkJqfTWZyzes8XYUgxF9xGPA/1TvlI2p6XJS2R1i9M00X+Y2gK3zGAQQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@secretlint/types": "11.4.0" + }, + "engines": { + "node": ">=20.0.0" + } + }, + "node_modules/@secretlint/config-loader": { + "version": "11.4.0", + "resolved": "https://registry.npmjs.org/@secretlint/config-loader/-/config-loader-11.4.0.tgz", + "integrity": "sha512-DEPtgz9VKDIuf0KsbrxxMzkHn1nlVAEpIIOykkqHftODZFm0EOKK+2h1PL/8Uo7vRtT4YRqsgDb0BBYyfTjNug==", + "dev": true, + "license": "MIT", + "dependencies": { + "@secretlint/profiler": "11.4.0", + "@secretlint/resolver": "11.4.0", + "@secretlint/types": "11.4.0", + "ajv": "^8.18.0", + "debug": "^4.4.3", + "rc-config-loader": "^4.1.4" + }, + "engines": { + "node": ">=20.0.0" + } + }, + "node_modules/@secretlint/core": { + "version": "11.4.0", + "resolved": "https://registry.npmjs.org/@secretlint/core/-/core-11.4.0.tgz", + "integrity": "sha512-bxpDYzWNcPT0xh+rUYI3AF/Trz5VMA9mUOoRxxKrv5f8zGzNJkr667LqGdQmpjqQ6Ql+Ke8+64J1HbKXDvE/ag==", + "dev": true, + "license": "MIT", + "dependencies": { + "@secretlint/profiler": "11.4.0", + "@secretlint/types": "11.4.0", + "debug": "^4.4.3", + "structured-source": "^4.0.0" + }, + "engines": { + "node": ">=20.0.0" + } + }, + "node_modules/@secretlint/formatter": { + "version": "11.4.0", + "resolved": "https://registry.npmjs.org/@secretlint/formatter/-/formatter-11.4.0.tgz", + "integrity": "sha512-4kzbges1+sJmTB7QGXWkuAprtDpFegiokBitaxd8XMHPFKhmFfGd3XVnquiprFZBUgXMSu8BquTJAOPC+z51Ew==", + "dev": true, + "license": "MIT", + "dependencies": { + "@secretlint/resolver": "11.4.0", + "@secretlint/types": "11.4.0", + "@textlint/linter-formatter": "^15.5.2", + "@textlint/module-interop": "^15.5.2", + "@textlint/types": "^15.5.2", + "chalk": "^5.6.2", + "debug": "^4.4.3", + "pluralize": "^8.0.0", + "strip-ansi": "^7.2.0", + "table": "^6.9.0", + "terminal-link": "^4.0.0" + }, + "engines": { + "node": ">=20.0.0" + } + }, + "node_modules/@secretlint/node": { + "version": "11.4.0", + "resolved": "https://registry.npmjs.org/@secretlint/node/-/node-11.4.0.tgz", + "integrity": "sha512-dWfVJs7/tCujsyR5D9xjVJsyrjondfLKs3xKpMfv7nIOn7SADi+xs6e0LjDDtAyCRcVe84GmFVU3I54BqJ43XQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@secretlint/config-loader": "11.4.0", + "@secretlint/core": "11.4.0", + "@secretlint/formatter": "11.4.0", + "@secretlint/profiler": "11.4.0", + "@secretlint/source-creator": "11.4.0", + "@secretlint/types": "11.4.0", + "debug": "^4.4.3", + "p-map": "^7.0.4" + }, + "engines": { + "node": ">=20.0.0" + } + }, + "node_modules/@secretlint/profiler": { + "version": "11.4.0", + "resolved": "https://registry.npmjs.org/@secretlint/profiler/-/profiler-11.4.0.tgz", + "integrity": "sha512-wemv+sxhNG8/4g+vXBcPNpJO9e43SQJaiM1lvDVWJVdkPCVOB3OEKUdlpUyuLc2i1G4UuUS3zzwyv/JkU5CQVw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@secretlint/resolver": { + "version": "11.4.0", + "resolved": "https://registry.npmjs.org/@secretlint/resolver/-/resolver-11.4.0.tgz", + "integrity": "sha512-PeK3F6U+SOvYcwXh2b97RBghLfOO5euGxaA7UKQe2nWcef9VkcLTX6ni+dRYdPJExOxU6WMWCdfY5yVbhd6aJw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@secretlint/secretlint-rule-preset-recommend": { + "version": "11.4.0", + "resolved": "https://registry.npmjs.org/@secretlint/secretlint-rule-preset-recommend/-/secretlint-rule-preset-recommend-11.4.0.tgz", + "integrity": "sha512-Jg6MsrDHYDpeEt9adqO6hLqqJLsGT+D/d87wrQCC+D24e7w07V/zpR07K63YogRtPyPKX0tneKnyR884ji7DSA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=20.0.0" + } + }, + "node_modules/@secretlint/source-creator": { + "version": "11.4.0", + "resolved": "https://registry.npmjs.org/@secretlint/source-creator/-/source-creator-11.4.0.tgz", + "integrity": "sha512-j4I1hBBYFbnBVEcj5EEbi4iXT/uK+gg6MBycBo2t2+HPzQ7pg2MDD5aWGHyd5qelrRcCV5Gw4VzMXz/NMKD2Wg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@secretlint/types": "11.4.0", + "istextorbinary": "^9.5.0" + }, + "engines": { + "node": ">=20.0.0" + } + }, + "node_modules/@secretlint/types": { + "version": "11.4.0", + "resolved": "https://registry.npmjs.org/@secretlint/types/-/types-11.4.0.tgz", + "integrity": "sha512-aqEnJHFtzRJX0QumzPSQW35yi6vwDgexPaAC5WoZFidatSQF1hH6lQIGY0FQrng+vP0zTTA0/45atowRlvrTNA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=20.0.0" + } + }, + "node_modules/@sindresorhus/merge-streams": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/@sindresorhus/merge-streams/-/merge-streams-2.3.0.tgz", + "integrity": "sha512-LtoMMhxAlorcGhmFYI+LhPgbPZCkgP6ra1YL604EeF6U98pLlQ3iWIGMdWSC+vWmPBWBNgmDBAhnAobLROJmwg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@textlint/ast-node-types": { + "version": "15.5.2", + "resolved": "https://registry.npmjs.org/@textlint/ast-node-types/-/ast-node-types-15.5.2.tgz", + "integrity": "sha512-fCaOxoup5LIyBEo7R1oYWE7V4bSX0KQeHh66twon9e9usaLE3ijgF8QjYsR6joCssdeCHVd0wHm7ppsEyTr6vg==", + "dev": true, + "license": "MIT" + }, + "node_modules/@textlint/linter-formatter": { + "version": "15.5.2", + "resolved": "https://registry.npmjs.org/@textlint/linter-formatter/-/linter-formatter-15.5.2.tgz", + "integrity": "sha512-jAw7jWM8+wU9cG6Uu31jGyD1B+PAVePCvnPKC/oov+2iBPKk3ao30zc/Itmi7FvXo4oPaL9PmzPPQhyniPVgVg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@azu/format-text": "^1.0.2", + "@azu/style-format": "^1.0.1", + "@textlint/module-interop": "15.5.2", + "@textlint/resolver": "15.5.2", + "@textlint/types": "15.5.2", + "chalk": "^4.1.2", + "debug": "^4.4.3", + "js-yaml": "^4.1.1", + "lodash": "^4.17.23", + "pluralize": "^2.0.0", + "string-width": "^4.2.3", + "strip-ansi": "^6.0.1", + "table": "^6.9.0", + "text-table": "^0.2.0" + } + }, + "node_modules/@textlint/linter-formatter/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/@textlint/linter-formatter/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/@textlint/linter-formatter/node_modules/pluralize": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/pluralize/-/pluralize-2.0.0.tgz", + "integrity": "sha512-TqNZzQCD4S42De9IfnnBvILN7HAW7riLqsCyp8lgjXeysyPlX5HhqKAcJHHHb9XskE4/a+7VGC9zzx8Ls0jOAw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@textlint/linter-formatter/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@textlint/module-interop": { + "version": "15.5.2", + "resolved": "https://registry.npmjs.org/@textlint/module-interop/-/module-interop-15.5.2.tgz", + "integrity": "sha512-mg6rMQ3+YjwiXCYoQXbyVfDucpTa1q5mhspd/9qHBxUq4uY6W8GU42rmT3GW0V1yOfQ9z/iRrgPtkp71s8JzXg==", + "dev": true, + "license": "MIT" + }, + "node_modules/@textlint/resolver": { + "version": "15.5.2", + "resolved": "https://registry.npmjs.org/@textlint/resolver/-/resolver-15.5.2.tgz", + "integrity": "sha512-YEITdjRiJaQrGLUWxWXl4TEg+d2C7+TNNjbGPHPH7V7CCnXm+S9GTjGAL7Q2WSGJyFEKt88Jvx6XdJffRv4HEA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@textlint/types": { + "version": "15.5.2", + "resolved": "https://registry.npmjs.org/@textlint/types/-/types-15.5.2.tgz", + "integrity": "sha512-sJOrlVLLXp4/EZtiWKWq9y2fWyZlI8GP+24rnU5avtPWBIMm/1w97yzKrAqYF8czx2MqR391z5akhnfhj2f/AQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@textlint/ast-node-types": "15.5.2" + } + }, + "node_modules/@types/normalize-package-data": { + "version": "2.4.4", + "resolved": "https://registry.npmjs.org/@types/normalize-package-data/-/normalize-package-data-2.4.4.tgz", + "integrity": "sha512-37i+OaWTh9qeK4LSHPsyRC7NahnGotNuZvjLSgcPzblpHB3rrCJxAOgI5gCdKm7coonsaX1Of0ILiTcnZjbfxA==", + "dev": true, + "license": "MIT" + }, + "node_modules/ajv": { + "version": "8.18.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.18.0.tgz", + "integrity": "sha512-PlXPeEWMXMZ7sPYOHqmDyCJzcfNrUr3fGNKtezX14ykXOEIvyK81d+qydx89KY5O71FKMPaQ2vBfBFI5NHR63A==", + "dev": true, + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.3", + "fast-uri": "^3.0.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ansi-escapes": { + "version": "7.3.0", + "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-7.3.0.tgz", + "integrity": "sha512-BvU8nYgGQBxcmMuEeUEmNTvrMVjJNSH7RgW24vXexN4Ven6qCvy4TntnvlnwnMLTVlcRQQdbRY8NKnaIoeWDNg==", + "dev": true, + "license": "MIT", + "dependencies": { + "environment": "^1.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ansi-regex": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz", + "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "dev": true, + "license": "Python-2.0" + }, + "node_modules/astral-regex": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/astral-regex/-/astral-regex-2.0.0.tgz", + "integrity": "sha512-Z7tMw1ytTXt5jqMcOP+OQteU1VuNK9Y02uuJtKQ1Sv69jXQKKg5cibLwGJow8yzZP+eAc18EmLGPal0bp36rvQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/binaryextensions": { + "version": "6.11.0", + "resolved": "https://registry.npmjs.org/binaryextensions/-/binaryextensions-6.11.0.tgz", + "integrity": "sha512-sXnYK/Ij80TO3lcqZVV2YgfKN5QjUWIRk/XSm2J/4bd/lPko3lvk0O4ZppH6m+6hB2/GTu+ptNwVFe1xh+QLQw==", + "dev": true, + "license": "Artistic-2.0", + "dependencies": { + "editions": "^6.21.0" + }, + "engines": { + "node": ">=4" + }, + "funding": { + "url": "https://bevry.me/fund" + } + }, + "node_modules/boundary": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/boundary/-/boundary-2.0.0.tgz", + "integrity": "sha512-rJKn5ooC9u8q13IMCrW0RSp31pxBCHE3y9V/tp3TdWSLf8Em3p6Di4NBpfzbJge9YjjFEsD0RtFEjtvHL5VyEA==", + "dev": true, + "license": "BSD-2-Clause" + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dev": true, + "license": "MIT", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/chalk": { + "version": "5.6.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-5.6.2.tgz", + "integrity": "sha512-7NzBL0rN6fMUW+f7A6Io4h40qQlG+xGmtMxfbnH/K7TAtt8JQWVQK+6g0UXKMeVJoyV5EkkNsErQ8pVD3bLHbA==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.17.0 || ^14.13 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true, + "license": "MIT" + }, "node_modules/cross-spawn": { "version": "7.0.6", "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", @@ -24,6 +513,24 @@ "node": ">= 8" } }, + "node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, "node_modules/dotenv": { "version": "16.6.1", "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.6.1.tgz", @@ -63,25 +570,405 @@ "node": ">=12" } }, - "node_modules/isexe": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "node_modules/editions": { + "version": "6.22.0", + "resolved": "https://registry.npmjs.org/editions/-/editions-6.22.0.tgz", + "integrity": "sha512-UgGlf8IW75je7HZjNDpJdCv4cGJWIi6yumFdZ0R7A8/CIhQiWUjyGLCxdHpd8bmyD1gnkfUNK0oeOXqUS2cpfQ==", "dev": true, - "license": "ISC" + "license": "Artistic-2.0", + "dependencies": { + "version-range": "^4.15.0" + }, + "engines": { + "ecmascript": ">= es5", + "node": ">=4" + }, + "funding": { + "url": "https://bevry.me/fund" + } }, - "node_modules/minimist": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", - "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true, + "license": "MIT" + }, + "node_modules/environment": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/environment/-/environment-1.1.0.tgz", + "integrity": "sha512-xUtoPkMggbz0MPyPiIWr1Kp4aeWJjDZ6SMvURhimjdZgsRuDplF5/s9hcgGhyXMhs+6vpnuoiZ2kFiu3FMnS8Q==", "dev": true, "license": "MIT", + "engines": { + "node": ">=18" + }, "funding": { - "url": "https://github.com/sponsors/ljharb" + "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/path-key": { - "version": "3.1.1", + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-glob": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.3.tgz", + "integrity": "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.8" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-uri": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/fast-uri/-/fast-uri-3.1.0.tgz", + "integrity": "sha512-iPeeDKJSWf4IEOasVVrknXpaBV0IApz/gp7S2bb7Z4Lljbl2MGJRqInZiUrQwV16cpzw/D3S5j5Julj/gT52AA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/fastify" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/fastify" + } + ], + "license": "BSD-3-Clause" + }, + "node_modules/fastq": { + "version": "1.20.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.20.1.tgz", + "integrity": "sha512-GGToxJ/w1x32s/D2EKND7kTil4n8OVk/9mycTc4VDza13lOvpUZTGX3mFSCtV9ksdGBVzvsyAVLM6mHFThxXxw==", + "dev": true, + "license": "ISC", + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dev": true, + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/globby": { + "version": "14.1.0", + "resolved": "https://registry.npmjs.org/globby/-/globby-14.1.0.tgz", + "integrity": "sha512-0Ia46fDOaT7k4og1PDW4YbodWWr3scS2vAr2lTbsplOt2WkKp0vQbkI9wKis/T5LV/dqPjO3bpS/z6GTJB82LA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sindresorhus/merge-streams": "^2.1.0", + "fast-glob": "^3.3.3", + "ignore": "^7.0.3", + "path-type": "^6.0.0", + "slash": "^5.1.0", + "unicorn-magic": "^0.3.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/hosted-git-info": { + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-7.0.2.tgz", + "integrity": "sha512-puUZAUKT5m8Zzvs72XWy3HtvVbTWljRE66cP60bxJzAqf2DgICo7lYTY2IHUmLnNpjYvw5bvmoHvPc0QO2a62w==", + "dev": true, + "license": "ISC", + "dependencies": { + "lru-cache": "^10.0.1" + }, + "engines": { + "node": "^16.14.0 || >=18.0.0" + } + }, + "node_modules/ignore": { + "version": "7.0.5", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-7.0.5.tgz", + "integrity": "sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/index-to-position": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/index-to-position/-/index-to-position-1.2.0.tgz", + "integrity": "sha512-Yg7+ztRkqslMAS2iFaU+Oa4KTSidr63OsFGlOrJoW981kIYO3CGCS3wA95P1mUi/IVSJkn0D479KTJpVpvFNuw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true, + "license": "ISC" + }, + "node_modules/istextorbinary": { + "version": "9.5.0", + "resolved": "https://registry.npmjs.org/istextorbinary/-/istextorbinary-9.5.0.tgz", + "integrity": "sha512-5mbUj3SiZXCuRf9fT3ibzbSSEWiy63gFfksmGfdOzujPjW3k+z8WvIBxcJHBoQNlaZaiyB25deviif2+osLmLw==", + "dev": true, + "license": "Artistic-2.0", + "dependencies": { + "binaryextensions": "^6.11.0", + "editions": "^6.21.0", + "textextensions": "^6.11.0" + }, + "engines": { + "node": ">=4" + }, + "funding": { + "url": "https://bevry.me/fund" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/js-yaml": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", + "dev": true, + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", + "dev": true, + "license": "MIT" + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "dev": true, + "license": "MIT", + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/lodash": { + "version": "4.17.23", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.23.tgz", + "integrity": "sha512-LgVTMpQtIopCi79SJeDiP0TfWi5CNEc/L/aRdTh3yIvmZXTnheWpKjSZhnvMl8iXbC1tFg9gdHHDMLoV7CnG+w==", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.truncate": { + "version": "4.4.2", + "resolved": "https://registry.npmjs.org/lodash.truncate/-/lodash.truncate-4.4.2.tgz", + "integrity": "sha512-jttmRe7bRse52OsWIMDLaXxWqRAmtIUccAQ3garviCqJjafXOfNMO0yMfNpdD6zbGaTU0P5Nz7e7gAT6cKmJRw==", + "dev": true, + "license": "MIT" + }, + "node_modules/lru-cache": { + "version": "10.4.3", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz", + "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/micromatch": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "dev": true, + "license": "MIT", + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/normalize-package-data": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-6.0.2.tgz", + "integrity": "sha512-V6gygoYb/5EmNI+MEGrWkC+e6+Rr7mTmfHrxDbLzxQogBkgzo76rkok0Am6thgSF7Mv2nLOajAJj5vDJZEFn7g==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "hosted-git-info": "^7.0.0", + "semver": "^7.3.5", + "validate-npm-package-license": "^3.0.4" + }, + "engines": { + "node": "^16.14.0 || >=18.0.0" + } + }, + "node_modules/p-map": { + "version": "7.0.4", + "resolved": "https://registry.npmjs.org/p-map/-/p-map-7.0.4.tgz", + "integrity": "sha512-tkAQEw8ysMzmkhgw8k+1U/iPhWNhykKnSk4Rd5zLoPJCuJaGRPo6YposrZgaxHKzDHdDWWZvE/Sk7hsL2X/CpQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/parse-json": { + "version": "8.3.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-8.3.0.tgz", + "integrity": "sha512-ybiGyvspI+fAoRQbIPRddCcSTV9/LsJbf0e/S85VLowVGzRmokfneg2kwVW/KU5rOXrPSbF1qAKPMgNTqqROQQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.26.2", + "index-to-position": "^1.1.0", + "type-fest": "^4.39.1" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/path-key": { + "version": "3.1.1", "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", "dev": true, @@ -90,6 +977,197 @@ "node": ">=8" } }, + "node_modules/path-type": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-6.0.0.tgz", + "integrity": "sha512-Vj7sf++t5pBD637NSfkxpHSMfWaeig5+DKWLhcqIYx6mWQz5hdJTGDVMQiJcw1ZYkhs7AazKDGpRVji1LJCZUQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "dev": true, + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pluralize": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/pluralize/-/pluralize-8.0.0.tgz", + "integrity": "sha512-Nc3IT5yHzflTfbjgqWcCPpo7DaKy4FnpB0l/zCAW0Tc7jxAiuqSxHasntB3D7887LSrA93kDJ9IXovxJYxyLCA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/rc-config-loader": { + "version": "4.1.4", + "resolved": "https://registry.npmjs.org/rc-config-loader/-/rc-config-loader-4.1.4.tgz", + "integrity": "sha512-3GiwEzklkbXTDp52UR5nT8iXgYAx1V9ZG/kDZT7p60u2GCv2XTwQq4NzinMoMpNtXhmt3WkhYXcj6HH8HdwCEQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "debug": "^4.4.3", + "js-yaml": "^4.1.1", + "json5": "^2.2.3", + "require-from-string": "^2.0.2" + } + }, + "node_modules/read-pkg": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-9.0.1.tgz", + "integrity": "sha512-9viLL4/n1BJUCT1NXVTdS1jtm80yDEgR5T4yCelII49Mbj0v1rZdKqj7zCiYdbB0CuCgdrvHcNogAKTFPBocFA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/normalize-package-data": "^2.4.3", + "normalize-package-data": "^6.0.0", + "parse-json": "^8.0.0", + "type-fest": "^4.6.0", + "unicorn-magic": "^0.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/read-pkg/node_modules/unicorn-magic": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/unicorn-magic/-/unicorn-magic-0.1.0.tgz", + "integrity": "sha512-lRfVq8fE8gz6QMBuDM6a+LO3IAzTi05H6gCVaUpir2E1Rwpo4ZUog45KpNXKC/Mn3Yb9UDuHumeFTo9iV/D9FQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/require-from-string": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", + "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/reusify": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.1.0.tgz", + "integrity": "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==", + "dev": true, + "license": "MIT", + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/secretlint": { + "version": "11.4.0", + "resolved": "https://registry.npmjs.org/secretlint/-/secretlint-11.4.0.tgz", + "integrity": "sha512-UYLrriP+QjLbyTxVUihMd8xva/2sPMgqIzJw2+4bRxXPZeWUXJ6b1BcyiTso6BnHcwYvFoytMMaoB/h+Nsvluw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@secretlint/config-creator": "11.4.0", + "@secretlint/formatter": "11.4.0", + "@secretlint/node": "11.4.0", + "@secretlint/profiler": "11.4.0", + "@secretlint/resolver": "11.4.0", + "debug": "^4.4.3", + "globby": "^14.1.0", + "read-pkg": "^9.0.1" + }, + "bin": { + "secretlint": "bin/secretlint.js" + }, + "engines": { + "node": ">=20.0.0" + } + }, + "node_modules/semver": { + "version": "7.7.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.4.tgz", + "integrity": "sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, "node_modules/shebang-command": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", @@ -113,6 +1191,310 @@ "node": ">=8" } }, + "node_modules/slash": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-5.1.0.tgz", + "integrity": "sha512-ZA6oR3T/pEyuqwMgAKT0/hAv8oAXckzbkmR0UkUosQ+Mc4RxGoJkRmwHgHufaenlyAgE1Mxgpdcrf75y6XcnDg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/slice-ansi": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-4.0.0.tgz", + "integrity": "sha512-qMCMfhY040cVHT43K9BFygqYbUPFZKHOg7K73mtTWJRb8pyP3fzf4Ixd5SzdEJQ6MRUg/WBnOLxghZtKKurENQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "astral-regex": "^2.0.0", + "is-fullwidth-code-point": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/slice-ansi?sponsor=1" + } + }, + "node_modules/spdx-correct": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/spdx-correct/-/spdx-correct-3.2.0.tgz", + "integrity": "sha512-kN9dJbvnySHULIluDHy32WHRUu3Og7B9sbY7tsFLctQkIqnMh3hErYgdMjTYuqmcXX+lK5T1lnUt3G7zNswmZA==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "spdx-expression-parse": "^3.0.0", + "spdx-license-ids": "^3.0.0" + } + }, + "node_modules/spdx-exceptions": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/spdx-exceptions/-/spdx-exceptions-2.5.0.tgz", + "integrity": "sha512-PiU42r+xO4UbUS1buo3LPJkjlO7430Xn5SVAhdpzzsPHsjbYVflnnFdATgabnLude+Cqu25p6N+g2lw/PFsa4w==", + "dev": true, + "license": "CC-BY-3.0" + }, + "node_modules/spdx-expression-parse": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/spdx-expression-parse/-/spdx-expression-parse-3.0.1.tgz", + "integrity": "sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "spdx-exceptions": "^2.1.0", + "spdx-license-ids": "^3.0.0" + } + }, + "node_modules/spdx-license-ids": { + "version": "3.0.23", + "resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.23.tgz", + "integrity": "sha512-CWLcCCH7VLu13TgOH+r8p1O/Znwhqv/dbb6lqWy67G+pT1kHmeD/+V36AVb/vq8QMIQwVShJ6Ssl5FPh0fuSdw==", + "dev": true, + "license": "CC0-1.0" + }, + "node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/string-width/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/string-width/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.2.0.tgz", + "integrity": "sha512-yDPMNjp4WyfYBkHnjIRLfca1i6KMyGCtsVgoKe/z1+6vukgaENdgGBZt+ZmKPc4gavvEZ5OgHfHdrazhgNyG7w==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.2.2" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/structured-source": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/structured-source/-/structured-source-4.0.0.tgz", + "integrity": "sha512-qGzRFNJDjFieQkl/sVOI2dUjHKRyL9dAJi2gCPGJLbJHBIkyOHxjuocpIEfbLioX+qSJpvbYdT49/YCdMznKxA==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "boundary": "^2.0.0" + } + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-hyperlinks": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/supports-hyperlinks/-/supports-hyperlinks-3.2.0.tgz", + "integrity": "sha512-zFObLMyZeEwzAoKCyu1B91U79K2t7ApXuQfo8OuxwXLDgcKxuwM+YvcbIhm6QWqz7mHUH1TVytR1PwVVjEuMig==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0", + "supports-color": "^7.0.0" + }, + "engines": { + "node": ">=14.18" + }, + "funding": { + "url": "https://github.com/chalk/supports-hyperlinks?sponsor=1" + } + }, + "node_modules/table": { + "version": "6.9.0", + "resolved": "https://registry.npmjs.org/table/-/table-6.9.0.tgz", + "integrity": "sha512-9kY+CygyYM6j02t5YFHbNz2FN5QmYGv9zAjVp4lCDjlCw7amdckXlEt/bjMhUIfj4ThGRE4gCUH5+yGnNuPo5A==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "ajv": "^8.0.1", + "lodash.truncate": "^4.4.2", + "slice-ansi": "^4.0.0", + "string-width": "^4.2.3", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/table/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/table/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/terminal-link": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/terminal-link/-/terminal-link-4.0.0.tgz", + "integrity": "sha512-lk+vH+MccxNqgVqSnkMVKx4VLJfnLjDBGzH16JVZjKE2DoxP57s6/vt6JmXV5I3jBcfGrxNrYtC+mPtU7WJztA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-escapes": "^7.0.0", + "supports-hyperlinks": "^3.2.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/text-table": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", + "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==", + "dev": true, + "license": "MIT" + }, + "node_modules/textextensions": { + "version": "6.11.0", + "resolved": "https://registry.npmjs.org/textextensions/-/textextensions-6.11.0.tgz", + "integrity": "sha512-tXJwSr9355kFJI3lbCkPpUH5cP8/M0GGy2xLO34aZCjMXBaK3SoPnZwr/oWmo1FdCnELcs4npdCIOFtq9W3ruQ==", + "dev": true, + "license": "Artistic-2.0", + "dependencies": { + "editions": "^6.21.0" + }, + "engines": { + "node": ">=4" + }, + "funding": { + "url": "https://bevry.me/fund" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/type-fest": { + "version": "4.41.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.41.0.tgz", + "integrity": "sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA==", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/unicorn-magic": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/unicorn-magic/-/unicorn-magic-0.3.0.tgz", + "integrity": "sha512-+QBBXBCvifc56fsbuxZQ6Sic3wqqc3WWaqxs58gvJrcOuN83HGTCwz3oS5phzU9LthRNE9VrJCFCLUgHeeFnfA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/validate-npm-package-license": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz", + "integrity": "sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "spdx-correct": "^3.0.0", + "spdx-expression-parse": "^3.0.0" + } + }, + "node_modules/version-range": { + "version": "4.15.0", + "resolved": "https://registry.npmjs.org/version-range/-/version-range-4.15.0.tgz", + "integrity": "sha512-Ck0EJbAGxHwprkzFO966t4/5QkRuzh+/I1RxhLgUKKwEn+Cd8NwM60mE3AqBZg5gYODoXW0EFsQvbZjRlvdqbg==", + "dev": true, + "license": "Artistic-2.0", + "engines": { + "node": ">=4" + }, + "funding": { + "url": "https://bevry.me/fund" + } + }, "node_modules/which": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", diff --git a/package.json b/package.json index 062ee49..5889f13 100644 --- a/package.json +++ b/package.json @@ -4,7 +4,7 @@ "description": "SimSteward SimHub plugin and local observability stack", "scripts": { "env:run": "dotenv -e .env --", - "obs:up": "docker compose -f observability/local/docker-compose.yml up -d", + "obs:up": "docker compose --env-file observability/local/.env.observability.local -f observability/local/docker-compose.yml up -d", "obs:up:env": "docker compose --env-file observability/local/.env.observability.local -f observability/local/docker-compose.yml up -d", "obs:down": "docker compose -f observability/local/docker-compose.yml down", "obs:ps": "docker compose -f observability/local/docker-compose.yml ps", @@ -12,9 +12,17 @@ "obs:poll:grafana": "pwsh -NoProfile -File scripts/poll-loki.ps1 -ViaGrafana", "obs:poll:grafana:env": "dotenv -e .env -- pwsh -NoProfile -File scripts/poll-loki.ps1 -ViaGrafana", "obs:wipe": "pwsh -NoProfile -File scripts/obs-wipe-local-data.ps1", - "loki:query": "dotenv -e .env -- node scripts/query-loki-once.mjs" + "obs:poll:prometheus": "pwsh -NoProfile -File scripts/poll-prometheus.ps1", + "obs:ports": "pwsh -NoProfile -File scripts/check-obs-ports.ps1", + "loki:query": "dotenv -e .env -- node scripts/query-loki-once.mjs", + "secrets:lint": "secretlint \"**/*\"", + "secrets:gitleaks": "pwsh -NoProfile -File scripts/run-gitleaks-docker.ps1", + "contextstream:ingest": "pwsh -NoProfile -File scripts/contextstream-ingest.ps1", + "contextstream:ingest:force": "pwsh -NoProfile -File scripts/contextstream-ingest.ps1 -Force" }, "devDependencies": { - "dotenv-cli": "^7.4.4" + "@secretlint/secretlint-rule-preset-recommend": "^11.4.0", + "dotenv-cli": "^7.4.4", + "secretlint": "^11.4.0" } } diff --git a/scripts/check-obs-ports.ps1 b/scripts/check-obs-ports.ps1 new file mode 100644 index 0000000..ef6546d --- /dev/null +++ b/scripts/check-obs-ports.ps1 @@ -0,0 +1,53 @@ +# List LISTENING sockets on ports used by Sim Steward observability compose + SimHub plugin defaults. +# Run from anywhere: pwsh -NoProfile -File scripts/check-obs-ports.ps1 +# Requires: Windows PowerShell 5+ or pwsh (Get-NetTCPConnection). + +$ErrorActionPreference = "SilentlyContinue" + +$ports = @( + @{ Port = 3000; Name = "Grafana (compose)" }, + @{ Port = 3100; Name = "Loki HTTP (compose)" }, + @{ Port = 3500; Name = "loki-gateway nginx (compose)" }, + @{ Port = 4317; Name = "OTLP gRPC (otel-collector host map)" }, + @{ Port = 4318; Name = "OTLP HTTP (otel-collector host map)" }, + @{ Port = 8080; Name = "data-api (compose)" }, + @{ Port = 8888; Name = "SimHub built-in HTTP (dashboard)" }, + @{ Port = 8889; Name = "Often SimHubWPF or other apps (compose uses host 18889 instead)" }, + @{ Port = 9090; Name = "Prometheus (compose)" }, + @{ Port = 13133; Name = "OTel collector health_check (compose)" }, + @{ Port = 18889; Name = "Collector /metrics on host (mapped to container 8889)" }, + @{ Port = 19847; Name = "Sim Steward WebSocket (SIMSTEWARD_WS_PORT default)" } +) + +Write-Host "Checking LISTENING TCP ports (Sim Steward / SimHub-related)...`n" + +$listen = Get-NetTCPConnection -State Listen -ErrorAction SilentlyContinue +if (-not $listen) { + Write-Host "Get-NetTCPConnection returned nothing (need admin or older OS). Falling back to netstat." + netstat -ano | findstr LISTENING + exit 0 +} + +foreach ($row in $ports) { + $p = $row.Port + $hits = $listen | Where-Object { $_.LocalPort -eq $p } + if ($hits) { + Write-Host "=== PORT $p - $($row.Name) ===" + foreach ($h in $hits | Select-Object -Unique LocalAddress, LocalPort, OwningProcess) { + $proc = Get-Process -Id $h.OwningProcess -ErrorAction SilentlyContinue + $pn = if ($proc) { $proc.ProcessName } else { "?" } + Write-Host (" {0}:{1} PID {2} {3}" -f $h.LocalAddress, $h.LocalPort, $h.OwningProcess, $pn) + } + Write-Host "" + } +} + +$any = $false +foreach ($row in $ports) { + if ($listen | Where-Object { $_.LocalPort -eq $row.Port }) { $any = $true; break } +} +if (-not $any) { + Write-Host 'None of the listed ports show LISTENING (stack likely down, or SimHub closed).' +} +Write-Host "" +Write-Host "PASS: Port scan complete." diff --git a/scripts/contextstream-ingest.ps1 b/scripts/contextstream-ingest.ps1 index 936cf00..07df58b 100644 --- a/scripts/contextstream-ingest.ps1 +++ b/scripts/contextstream-ingest.ps1 @@ -6,6 +6,14 @@ Runs contextstream-mcp.exe ingest with credentials from .env via envmcp. Use from a normal terminal (or this script uses cmd.exe so ingest sees a console). + When to use -Force: after .cursorignore changes, large refactors, new first-party + paths under src/, or when ContextStream search/graph looks stale. Equivalent npm: + npm run contextstream:ingest:force + + Note: .cursorignore affects Cursor IDE indexing; ContextStream server-side ingest + may apply separate include/exclude rules (see product docs). For MCP-driven refresh + use project(ingest_local, force=true). + .PARAMETER Force Pass --force to re-upload all files. #> diff --git a/scripts/grafana-bootstrap.ps1 b/scripts/grafana-bootstrap.ps1 index c9cbe68..6d9e943 100644 --- a/scripts/grafana-bootstrap.ps1 +++ b/scripts/grafana-bootstrap.ps1 @@ -3,15 +3,13 @@ # Auth: GRAFANA_ADMIN_USER_OVERRIDE / GRAFANA_ADMIN_PASSWORD_OVERRIDE, else repo .env GRAFANA_ADMIN_USER / GRAFANA_ADMIN_PASSWORD, else admin/admin. $ErrorActionPreference = "Stop" $repoRoot = $PSScriptRoot | Split-Path -Parent -$envFile = Join-Path $repoRoot ".env" -if (Test-Path $envFile) { - Get-Content $envFile | ForEach-Object { - if ($_ -match '^\s*([^#][^=]*)=(.*)$') { - $name = $Matches[1].Trim() - $value = $Matches[2].Trim().Trim('"') - [System.Environment]::SetEnvironmentVariable($name, $value, "Process") - } - } +$loadDotenv = Join-Path $repoRoot "scripts\load-dotenv.ps1" +if (Test-Path $loadDotenv) { + . $loadDotenv + Import-DotEnv @( + (Join-Path $repoRoot ".env"), + (Join-Path $repoRoot "observability\local\.env.observability.local") + ) } $GrafanaUrl = "http://localhost:3000" @@ -71,7 +69,7 @@ Write-Host "API Token generated." # 3. Update .env file if (-not (Test-Path $envFile)) { - Write-Host "FAIL: .env not found at $envFile — copy from .env.example first." + Write-Host "FAIL: .env not found at $envFile - copy from .env.example first." exit 1 } $envContent = Get-Content $envFile -Raw diff --git a/scripts/load-dotenv.ps1 b/scripts/load-dotenv.ps1 new file mode 100644 index 0000000..e670db0 --- /dev/null +++ b/scripts/load-dotenv.ps1 @@ -0,0 +1,60 @@ +# Load KEY=VALUE pairs from .env-style files into the current process environment. +# Later files override earlier keys. Skips comments and blank lines. +# Usage (repo root): . .\scripts\load-dotenv.ps1 +# Import-DotEnv @((Join-Path $PSScriptRoot "..\.env")) +# Optional second file (e.g. LOKI_PUSH_TOKEN from Docker stack): +# Import-DotEnv @($envPath, (Join-Path $RepoRoot "observability\local\.env.observability.local")) + +function Import-DotEnv { + param( + [Parameter(Mandatory)] + [string[]]$Path + ) + foreach ($file in $Path) { + if ([string]::IsNullOrWhiteSpace($file) -or -not (Test-Path -LiteralPath $file)) { continue } + $content = Get-Content -LiteralPath $file -Raw -ErrorAction Stop + foreach ($rawLine in $content -split "`r?`n") { + $line = $rawLine.Trim() + if (-not $line -or $line.StartsWith("#")) { continue } + if ($line.StartsWith("export ", [System.StringComparison]::OrdinalIgnoreCase)) { + $line = $line.Substring(7).Trim() + } + $eq = $line.IndexOf("=") + if ($eq -le 0) { continue } + $key = $line.Substring(0, $eq).Trim() + if (-not $key) { continue } + $val = $line.Substring($eq + 1).Trim() + # Trailing comment: KEY=value # note (space before #) + $spHash = $val.IndexOf(" #") + if ($spHash -ge 0) { $val = $val.Substring(0, $spHash).Trim() } + if ($val.Length -ge 2) { + if (($val.StartsWith('"') -and $val.EndsWith('"')) -or ($val.StartsWith("'") -and $val.EndsWith("'"))) { + $val = $val.Substring(1, $val.Length - 2) + } + } + Set-Item -Path "Env:$key" -Value $val + } + } +} + +# Primary env file: use -EnvFile path (absolute or repo-relative) instead of default repo .env; then merge observability/local/.env.observability.local if present. +function Resolve-SimStewardEnvPaths { + param( + [Parameter(Mandatory)][string]$RepoRoot, + [string]$EnvFile = '' + ) + $list = [System.Collections.Generic.List[string]]::new() + if (-not [string]::IsNullOrWhiteSpace($EnvFile)) { + $resolved = if ([System.IO.Path]::IsPathRooted($EnvFile)) { $EnvFile } else { Join-Path $RepoRoot $EnvFile } + if (-not (Test-Path -LiteralPath $resolved)) { + throw "Env file not found: $resolved" + } + [void]$list.Add($resolved) + } else { + $def = Join-Path $RepoRoot '.env' + if (Test-Path -LiteralPath $def) { [void]$list.Add($def) } + } + $obs = Join-Path $RepoRoot 'observability\local\.env.observability.local' + if (Test-Path -LiteralPath $obs) { [void]$list.Add($obs) } + return ,$list.ToArray() +} diff --git a/scripts/obs-wipe-local-data.ps1 b/scripts/obs-wipe-local-data.ps1 index d511c36..e8ad5ba 100644 --- a/scripts/obs-wipe-local-data.ps1 +++ b/scripts/obs-wipe-local-data.ps1 @@ -1,4 +1,4 @@ -# Wipe persisted data for the local observability stack (Loki chunks/WAL, optional Grafana, sample logs). +# Wipe persisted data for the local observability stack (Loki chunks/WAL, Prometheus TSDB, optional Grafana, sample logs). # Does NOT remove Loki config, datasource provisioning, gateway tokens in .env, or compose services. # Usage (repo root): .\scripts\obs-wipe-local-data.ps1 -Force # Optional: -Grafana -SampleLogs or -All (both optional dirs with -Force). @@ -66,6 +66,9 @@ finally { Write-Host "Wiping Loki data under: $(Join-Path $base 'loki')" Clear-DirectoryContents (Join-Path $base "loki") +Write-Host "Wiping Prometheus TSDB under: $(Join-Path $base 'prometheus')" +Clear-DirectoryContents (Join-Path $base "prometheus") + if ($Grafana) { Write-Host "Wiping Grafana lib under: $(Join-Path $base 'grafana')" Clear-DirectoryContents (Join-Path $base "grafana") @@ -79,5 +82,5 @@ if ($SampleLogs) { Write-Host "PASS: Local observability data wiped. Restart with npm run obs:up or obs:up:env." if ($Grafana) { - Write-Host "Note: Grafana volume cleared — re-run scripts/grafana-bootstrap.ps1 if you use GRAFANA_API_TOKEN." + Write-Host "Note: Grafana volume cleared - re-run scripts/grafana-bootstrap.ps1 if you use GRAFANA_API_TOKEN." } diff --git a/scripts/poll-loki.ps1 b/scripts/poll-loki.ps1 index 293b068..1e81e7f 100644 --- a/scripts/poll-loki.ps1 +++ b/scripts/poll-loki.ps1 @@ -8,7 +8,7 @@ param( [string]$LokiUrl = "", - [string]$Query = '{app="sim-steward"} | json | level != "DEBUG"', + [string]$Query = '{app=~"sim-steward|claude-dev-logging"} | json | level != "DEBUG"', [int]$IntervalSeconds = 2, [int]$LookbackSeconds = 120, [switch]$ViaGrafana @@ -16,16 +16,13 @@ param( $ErrorActionPreference = "Stop" $repoRoot = $PSScriptRoot | Split-Path -Parent -$envFile = Join-Path $repoRoot ".env" - -if (Test-Path $envFile) { - Get-Content $envFile | ForEach-Object { - if ($_ -match '^\s*([^#][^=]*)=(.*)$') { - $name = $Matches[1].Trim() - $value = $Matches[2].Trim().Trim('"') - [System.Environment]::SetEnvironmentVariable($name, $value, "Process") - } - } +$loadDotenv = Join-Path $repoRoot "scripts\load-dotenv.ps1" +if (Test-Path $loadDotenv) { + . $loadDotenv + Import-DotEnv @( + (Join-Path $repoRoot ".env"), + (Join-Path $repoRoot "observability\local\.env.observability.local") + ) } $useGrafanaProxy = [bool]$ViaGrafana diff --git a/scripts/poll-prometheus.ps1 b/scripts/poll-prometheus.ps1 new file mode 100644 index 0000000..3151044 --- /dev/null +++ b/scripts/poll-prometheus.ps1 @@ -0,0 +1,19 @@ +# Smoke-query local Prometheus (same TSDB as Grafana datasource prometheus_local). +# Usage (repo root): .\scripts\poll-prometheus.ps1 [-Query "up"] +param( + [string]$BaseUrl = "http://127.0.0.1:9090", + [string]$Query = "up" +) + +$ErrorActionPreference = "Stop" +$enc = [System.Uri]::EscapeDataString($Query) +$uri = "$BaseUrl/api/v1/query?query=$enc" +Write-Host "GET $uri" +try { + $r = Invoke-RestMethod -Uri $uri -Method Get + $r | ConvertTo-Json -Depth 6 + Write-Host "PASS: Prometheus query returned." +} catch { + Write-Host "FAIL: $_" + exit 1 +} diff --git a/scripts/publish-dashboards.ps1 b/scripts/publish-dashboards.ps1 index eb656df..089bd0e 100644 --- a/scripts/publish-dashboards.ps1 +++ b/scripts/publish-dashboards.ps1 @@ -1,23 +1,24 @@ # Publish Grafana dashboards from JSON files to the Grafana API. -# Usage: .\scripts\publish-dashboards.ps1 +# Usage (any cwd): .\scripts\publish-dashboards.ps1 $ErrorActionPreference = "Stop" +$repoRoot = $PSScriptRoot | Split-Path -Parent $GrafanaUrl = "http://localhost:3000" -$DashboardDir = "observability/local/grafana/provisioning/dashboards" +$DashboardDir = Join-Path $repoRoot "observability\local\grafana\provisioning\dashboards" -# Load .env file to get GRAFANA_API_TOKEN -$envFile = ".env" -if (-not (Test-Path $envFile)) { - Write-Host "FAIL: .env file not found. Run grafana-bootstrap.ps1 first." +$loadDotenv = Join-Path $repoRoot "scripts\load-dotenv.ps1" +if (-not (Test-Path (Join-Path $repoRoot ".env"))) { + Write-Host "FAIL: .env not found at $(Join-Path $repoRoot '.env'). Run grafana-bootstrap.ps1 first." exit 1 } -Get-Content $envFile | Foreach-Object { - if ($_ -match '^(?[^=]+)=(?.*)') { - $name = $Matches.name.Trim() - $value = $Matches.value.Trim().Trim('"') - # Set environment variable for the current process - [System.Environment]::SetEnvironmentVariable($name, $value, "Process") - } +if (Test-Path $loadDotenv) { + . $loadDotenv + Import-DotEnv @( + (Join-Path $repoRoot ".env"), + (Join-Path $repoRoot "observability\local\.env.observability.local") + ) } +$gu = [System.Environment]::GetEnvironmentVariable("GRAFANA_URL", "Process") +if ($gu) { $GrafanaUrl = $gu.Trim().TrimEnd('/') } $grafanaApiToken = [System.Environment]::GetEnvironmentVariable("GRAFANA_API_TOKEN", "Process") @@ -33,7 +34,7 @@ $headers = @{ $dashboardFiles = @(Get-ChildItem -Path $DashboardDir -Filter "*.json" -ErrorAction SilentlyContinue) if ($dashboardFiles.Count -eq 0) { - Write-Host "PASS: No dashboard JSON in $DashboardDir — nothing to publish." + Write-Host "PASS: No dashboard JSON in $DashboardDir - nothing to publish." exit 0 } diff --git a/scripts/run-gitleaks-docker.ps1 b/scripts/run-gitleaks-docker.ps1 new file mode 100644 index 0000000..1ee40a8 --- /dev/null +++ b/scripts/run-gitleaks-docker.ps1 @@ -0,0 +1,6 @@ +# Run official Gitleaks image against full git history (mount repo at /repo). +# Usage (repo root): pwsh -NoProfile -File scripts/run-gitleaks-docker.ps1 +$ErrorActionPreference = "Stop" +$repoRoot = Split-Path -Parent $PSScriptRoot +$abs = (Resolve-Path $repoRoot).Path +docker run --rm -v "${abs}:/repo" zricethezav/gitleaks:latest detect --source=/repo --verbose diff --git a/scripts/run-simhub-local-observability.ps1 b/scripts/run-simhub-local-observability.ps1 index d326d7a..bd0235d 100644 --- a/scripts/run-simhub-local-observability.ps1 +++ b/scripts/run-simhub-local-observability.ps1 @@ -1,40 +1,35 @@ # Start SimHub with env vars set for local Loki (plugin pushes to local Grafana stack). -# Run from plugin repo root: .\scripts\run-simhub-local-observability.ps1 +# Run from plugin repo root: .\scripts\run-simhub-local-observability.ps1 [-EnvFile path\to\secrets.env] # See docs/observability-local.md and docs/GRAFANA-LOGGING.md. +param([string]$EnvFile = "") + $ErrorActionPreference = "Stop" $ScriptDir = $PSScriptRoot $PluginRoot = (Resolve-Path (Join-Path $ScriptDir "..")).Path -# Load .env from repo root if present (KEY=VALUE; skip comments and empty lines) -$envFile = Join-Path $PluginRoot ".env" -if (Test-Path $envFile) { - Get-Content $envFile -Raw | ForEach-Object { - $_ -split "`n" | ForEach-Object { - $line = $_.Trim() - if ($line -and -not $line.StartsWith("#")) { - $idx = $line.IndexOf("=") - if ($idx -gt 0) { - $key = $line.Substring(0, $idx).Trim() - $val = $line.Substring($idx + 1).Trim() - if ($val.Length -ge 2 -and $val.StartsWith('"') -and $val.EndsWith('"')) { $val = $val.Substring(1, $val.Length - 2) } - if ($key -and $key -notmatch "^\s*#") { Set-Item -Path "Env:$key" -Value $val } - } - } - } +# Load .env (+ optional Docker stack token file) - same as deploy.ps1 +$loadDotenv = Join-Path $ScriptDir "load-dotenv.ps1" +if (Test-Path $loadDotenv) { + . $loadDotenv + Import-DotEnv (Resolve-SimStewardEnvPaths -RepoRoot $PluginRoot -EnvFile $EnvFile) + if (-not [string]::IsNullOrWhiteSpace($EnvFile)) { + Write-Host "Loaded env from -EnvFile $EnvFile (+ observability local merge if present)" + } else { + Write-Host "Loaded env from .env / .env.observability.local (if present)" } - Write-Host "Loaded env from: $envFile" } # Debug log for agent sessions (writes to workspace so we can read after run) $env:SIMSTEWARD_DEBUG_LOG_PATH = Join-Path $PluginRoot "debug-e2bb5f.log" -# Force local Loki so plugin pushes to local Docker stack -$env:SIMSTEWARD_LOKI_URL = "http://localhost:3100" -$env:SIMSTEWARD_LOKI_USER = "" -$env:SIMSTEWARD_LOKI_TOKEN = "" -$env:SIMSTEWARD_LOG_ENV = "local" -if (-not $env:SIMSTEWARD_LOG_DEBUG) { $env:SIMSTEWARD_LOG_DEBUG = "1" } +# Defaults for local stack only when not set in .env +if ([string]::IsNullOrWhiteSpace($env:SIMSTEWARD_LOKI_URL)) { $env:SIMSTEWARD_LOKI_URL = "http://localhost:3100" } +if ([string]::IsNullOrWhiteSpace($env:SIMSTEWARD_LOG_ENV)) { $env:SIMSTEWARD_LOG_ENV = "local" } +if ([string]::IsNullOrWhiteSpace($env:SIMSTEWARD_LOG_DEBUG)) { $env:SIMSTEWARD_LOG_DEBUG = "1" } + +# OTLP → OpenTelemetry Collector (see docs/observability-local.md). Override in .env if needed. +if (-not $env:OTEL_EXPORTER_OTLP_ENDPOINT) { $env:OTEL_EXPORTER_OTLP_ENDPOINT = "http://127.0.0.1:4317" } # Resolve SimHub path (same logic as deploy.ps1) $SimHubPath = $null @@ -59,6 +54,6 @@ if (-not (Test-Path $SimHubExe)) { Write-Error "SimHub not found at: $SimHubExe. Set SIMHUB_PATH to your SimHub folder." } -Write-Host "Starting SimHub with local Loki (SIMSTEWARD_LOKI_URL=$env:SIMSTEWARD_LOKI_URL, SIMSTEWARD_LOG_ENV=$env:SIMSTEWARD_LOG_ENV)" +Write-Host "Starting SimHub with local Loki + OTLP metrics (SIMSTEWARD_LOKI_URL=$env:SIMSTEWARD_LOKI_URL, OTEL_EXPORTER_OTLP_ENDPOINT=$env:OTEL_EXPORTER_OTLP_ENDPOINT, SIMSTEWARD_LOG_ENV=$env:SIMSTEWARD_LOG_ENV)" Write-Host "SimHub: $SimHubExe" & $SimHubExe diff --git a/scripts/seed-and-validate-loki.ps1 b/scripts/seed-and-validate-loki.ps1 index 64908e7..c7c3b6d 100644 --- a/scripts/seed-and-validate-loki.ps1 +++ b/scripts/seed-and-validate-loki.ps1 @@ -15,7 +15,7 @@ $jsonlPath = Join-Path $sampleLogsDir "plugin-structured.jsonl" if (-not (Test-Path $sampleLogsDir)) { New-Item -ItemType Directory -Force -Path $sampleLogsDir | Out-Null } $ts = [DateTime]::UtcNow.ToString("yyyy-MM-ddTHH:mm:ss.fffZ") -$line = "{`"level`":`"INFO`",`"message`":`"Seed line at $ts — pipeline check`",`"timestamp`":`"$ts`",`"component`":`"simhub-plugin`",`"event`":`"pipeline_test`",`"domain`":`"lifecycle`"}" +$line = "{`"level`":`"INFO`",`"message`":`"Seed line at $ts - pipeline check`",`"timestamp`":`"$ts`",`"component`":`"simhub-plugin`",`"event`":`"pipeline_test`",`"domain`":`"lifecycle`"}" Add-Content -Path $jsonlPath -Value $line -Encoding UTF8 Write-Host "Appended 1 line to $jsonlPath" diff --git a/scripts/send-deploy-loki-marker.ps1 b/scripts/send-deploy-loki-marker.ps1 new file mode 100644 index 0000000..a752d1c --- /dev/null +++ b/scripts/send-deploy-loki-marker.ps1 @@ -0,0 +1,82 @@ +# Push a single structured line to Loki marking deploy.ps1 completion (optional). +# Skips silently if SIMSTEWARD_LOKI_URL is unset. Uses same 4-label schema as the plugin. +# See docs/GRAFANA-LOGGING.md (event deploy_marker). + +param( + [ValidateSet('ok', 'failed')] + [string]$Status = 'ok', + [switch]$PostDeployWarning, + [string]$Detail = '', + [string]$EnvFile = '' +) + +$ErrorActionPreference = 'Stop' + +$repoRoot = (Resolve-Path (Join-Path $PSScriptRoot '..')).Path +$loadDotenv = Join-Path $repoRoot 'scripts\load-dotenv.ps1' +if (Test-Path $loadDotenv) { + . $loadDotenv + Import-DotEnv (Resolve-SimStewardEnvPaths -RepoRoot $repoRoot -EnvFile $EnvFile) +} + +$url = $env:SIMSTEWARD_LOKI_URL +if ([string]::IsNullOrWhiteSpace($url)) { return } + +$envName = $env:SIMSTEWARD_LOG_ENV +if ([string]::IsNullOrWhiteSpace($envName)) { $envName = 'local' } + +$tsNs = [DateTimeOffset]::UtcNow.ToUnixTimeMilliseconds() * 1000000 + +$bodyObj = [ordered]@{ + event = 'deploy_marker' + deploy_status = $Status + post_deploy_warn = $PostDeployWarning.IsPresent + detail = $Detail + machine = $env:COMPUTERNAME + simhub_path = $(if ($env:SIMHUB_PATH) { $env:SIMHUB_PATH } else { '' }) +} +$line = ($bodyObj | ConvertTo-Json -Compress -Depth 5) + +$lvl = if ($Status -eq 'failed') { 'ERROR' } elseif ($PostDeployWarning) { 'WARN' } else { 'INFO' } +$streamObj = [ordered]@{ + stream = @{ + app = 'sim-steward' + env = $envName + component = 'deploy' + level = $lvl + } + # Loki expects values as [[tsNs, line], ...] - leading comma forces a nested array in PowerShell. + values = @( , @( [string]$tsNs, $line ) ) +} +$root = [ordered]@{ streams = @( $streamObj ) } +$payload = $root | ConvertTo-Json -Depth 20 -Compress + +$pushUri = $url.TrimEnd('/') + '/loki/api/v1/push' +$headers = @{ 'Content-Type' = 'application/json' } +$lokiUser = $env:SIMSTEWARD_LOKI_USER +$lokiPass = $env:SIMSTEWARD_LOKI_TOKEN +$gatewayToken = $env:LOKI_PUSH_TOKEN +if ($url -match 'grafana\.net' -and ([string]::IsNullOrWhiteSpace($lokiUser) -or [string]::IsNullOrWhiteSpace($lokiPass))) { + Write-Host "send-deploy-loki-marker: warn - SIMSTEWARD_LOKI_URL looks like Grafana Cloud but SIMSTEWARD_LOKI_USER / SIMSTEWARD_LOKI_TOKEN missing (Basic auth required)." +} +# Grafana Cloud Loki: Basic (instance user id + API token). Local loki-gateway: Bearer LOKI_PUSH_TOKEN. Local Loki :3100: often no auth. +if (-not [string]::IsNullOrWhiteSpace($lokiUser) -and -not [string]::IsNullOrWhiteSpace($lokiPass)) { + $pair = [Text.Encoding]::ASCII.GetBytes(("{0}:{1}" -f $lokiUser.Trim(), $lokiPass.Trim())) + $headers['Authorization'] = 'Basic ' + [Convert]::ToBase64String($pair) +} elseif (-not [string]::IsNullOrWhiteSpace($gatewayToken)) { + $headers['Authorization'] = 'Bearer ' + $gatewayToken.Trim() +} + +try { + Invoke-RestMethod -Uri $pushUri -Method Post -Headers $headers -Body $payload -TimeoutSec 15 | Out-Null + $hostOnly = try { ([Uri]$url.Trim()).Host } catch { $url } + Write-Host "send-deploy-loki-marker: pushed OK ($hostOnly)" +} catch { + $code = '' + try { + $resp = $_.Exception.Response + if ($null -ne $resp -and $resp.StatusCode) { $code = ' HTTP {0}' -f [int]$resp.StatusCode } + } catch { } + Write-Host "send-deploy-loki-marker: push failed (non-fatal):$code $($_.Exception.Message)" + Write-Host " Auth: Grafana Cloud -> set SIMSTEWARD_LOKI_USER + SIMSTEWARD_LOKI_TOKEN; local gateway :3500 -> LOKI_PUSH_TOKEN; local :3100 -> leave auth unset." +} diff --git a/scripts/validate-grafana-logs.ps1 b/scripts/validate-grafana-logs.ps1 index a5e3d4a..e91bf95 100644 --- a/scripts/validate-grafana-logs.ps1 +++ b/scripts/validate-grafana-logs.ps1 @@ -5,24 +5,21 @@ param( [string]$DebugLogPath = "debug-2291d4.log", - [string]$Query = '{app="sim-steward"}', + [string]$Query = '{app=~"sim-steward|claude-dev-logging"}', [int]$LookbackSeconds = 7200 ) $ErrorActionPreference = "Stop" $repoRoot = $PSScriptRoot | Split-Path -Parent -$envFile = Join-Path $repoRoot ".env" $debugLog = if ([System.IO.Path]::IsPathRooted($DebugLogPath)) { $DebugLogPath } else { Join-Path $repoRoot $DebugLogPath } -# Load .env -if (Test-Path $envFile) { - Get-Content $envFile | ForEach-Object { - if ($_ -match '^\s*([^#][^=]*)=(.*)$') { - $name = $Matches[1].Trim() - $value = $Matches[2].Trim().Trim('"') - [System.Environment]::SetEnvironmentVariable($name, $value, "Process") - } - } +$loadDotenv = Join-Path $repoRoot "scripts\load-dotenv.ps1" +if (Test-Path $loadDotenv) { + . $loadDotenv + Import-DotEnv @( + (Join-Path $repoRoot ".env"), + (Join-Path $repoRoot "observability\local\.env.observability.local") + ) } $lokiUrl = [System.Environment]::GetEnvironmentVariable("LOKI_QUERY_URL", "Process") @@ -95,5 +92,5 @@ if ($d.error) { $msg += ", error=$($d.error)" } Write-Host $msg if ($d.status -eq "ok" -and $d.totalLines -eq 0 -and $lokiUrl -match "localhost") { Write-Host "" - Write-Host "No plugin logs in Loki. Confirm plugin-structured.jsonl is ingested to Loki (your shipper or Grafana Cloud). SimHub data dir e.g. $env:LOCALAPPDATA\SimHubWpf\PluginsData\SimSteward — see docs/observability-local.md." -ForegroundColor Yellow + Write-Host "No plugin logs in Loki. Confirm plugin-structured.jsonl is ingested to Loki (your shipper or Grafana Cloud). SimHub data dir e.g. $env:LOCALAPPDATA\SimHubWpf\PluginsData\SimSteward - see docs/observability-local.md." -ForegroundColor Yellow } diff --git a/scripts/watch-deploy.ps1 b/scripts/watch-deploy.ps1 index 22135fe..632379d 100644 --- a/scripts/watch-deploy.ps1 +++ b/scripts/watch-deploy.ps1 @@ -1,5 +1,6 @@ param( - [int]$DebounceMs = 750 + [int]$DebounceMs = 750, + [string]$EnvFile = "" ) $ErrorActionPreference = 'Stop' @@ -54,10 +55,14 @@ function Invoke-Deploy { $deployInProgress = $true try { Write-Host '[watch-deploy] Starting deploy (SIMHUB_SKIP_LAUNCH=1)...' - $env:SIMHUB_SKIP_LAUNCH = '1' + $env:SIMHUB_SKIP_LAUNCH = '0' Push-Location $RepoRoot try { - & pwsh -NoProfile -File $DeployScript + if ([string]::IsNullOrWhiteSpace($EnvFile)) { + & pwsh -NoProfile -File $DeployScript + } else { + & pwsh -NoProfile -File $DeployScript -EnvFile $EnvFile + } } catch { Write-Host "[watch-deploy] Deploy failed: $($_.Exception.Message)" } finally { diff --git a/src/SimSteward.Dashboard/data-capture-suite.html b/src/SimSteward.Dashboard/data-capture-suite.html new file mode 100644 index 0000000..2f70c16 --- /dev/null +++ b/src/SimSteward.Dashboard/data-capture-suite.html @@ -0,0 +1,1375 @@ + + + + + +Sim Steward — SDK Data Capture Suite + + + + + +
+ + + + + +
+
1 Signals
+
+ WS + Plugin + SimHub + Grafana + Replay + Full replay + +
+
+ + +
+
2 Pre-test Conditions
+
+ Replay scope: + + + + +
+
+
+
+ Preflight ID: +  ·  Level: 0/3 +
+
+ + +
+
3 Test Selection
+
+
+
+ + +
+
4 Execute
+
+ + +
+
+
+
+ Phase: idle +  ·  Step: +  ·  Elapsed: +  ·  Run ID: +
+
+
+ + +
+
5 Results
+
+ Pending + Running + Emitted + Tentative pass + Tentative fail + Found + Pass + Fail + Skip +
+ + +
+ +
+ + +
+
6 Grafana Confirmation
+
+ + ⬡ Open in Grafana + +
+
+
+
+ + + + diff --git a/src/SimSteward.Dashboard/index.html b/src/SimSteward.Dashboard/index.html index 189292e..13a8e2b 100644 --- a/src/SimSteward.Dashboard/index.html +++ b/src/SimSteward.Dashboard/index.html @@ -475,6 +475,16 @@ ::-webkit-scrollbar-track { background: transparent; } ::-webkit-scrollbar-thumb { background: var(--border); border-radius: 2px; } + +
@@ -483,6 +493,7 @@
WAITING
—:—— + v—
iRacing @@ -490,6 +501,7 @@ SimHub
Replay index → + Capture suite → WS ○
@@ -764,15 +776,26 @@ renderInc(); renderStd(); log('health','INFO ','ws_connected','WebSocket connected to plugin at ' + url); + if (typeof Sentry !== 'undefined') { + Sentry.addBreadcrumb({ category: 'ws', message: 'WebSocket connected', level: 'info' }); + Sentry.setTag('ws_connected', true); + } }; ws.onclose = () => { ws = null; mockPaused = false; setWs('disconnected'); log('health','WARN ','ws_closed','Connection lost — retrying in 3 s'); + if (typeof Sentry !== 'undefined') { + Sentry.addBreadcrumb({ category: 'ws', message: 'WebSocket disconnected', level: 'warning' }); + Sentry.setTag('ws_connected', false); + } setTimeout(connectWs, 3000); }; ws.onerror = () => { ws = null; mockPaused = false; }; - ws.onmessage = e => { try { onMsg(JSON.parse(e.data)); } catch {} }; + ws.onmessage = e => { + try { onMsg(JSON.parse(e.data)); } + catch (err) { if (typeof Sentry !== 'undefined') Sentry.captureException(err); } + }; } catch { setWs('disconnected'); setTimeout(connectWs, 3000); @@ -787,6 +810,14 @@ /** Structured UI telemetry for Loki (plugin → plugin-structured.jsonl; ingestion outside plugin). */ function sendDashboardUiEvent({ element_id, event_type, message, value }) { + if (typeof Sentry !== 'undefined') { + Sentry.addBreadcrumb({ + category: 'ui.' + event_type, + message: message, + data: { element_id }, + level: 'info', + }); + } if (!ws || ws.readyState !== WebSocket.OPEN) return false; const o = { action: 'log', event: 'dashboard_ui_event', element_id, event_type, message }; if (value !== undefined && value !== null) o.value = value; @@ -822,6 +853,17 @@ // ── State update ───────────────────────────────────────── function onState(m) { const mode = m.pluginMode || 'Unknown'; + const pvEl = document.getElementById('plugin-version'); + if (pvEl) { + const pv = m.pluginVersion ? String(m.pluginVersion) : ''; + pvEl.textContent = pv ? ('v' + pv) : 'v—'; + pvEl.title = pv ? ('Sim Steward plugin ' + pv) : 'Sim Steward plugin build'; + if (pv && typeof Sentry !== 'undefined') { + const client = Sentry.getClient(); + if (client && client.getOptions) client.getOptions().release = pv; + Sentry.setTag('plugin_version', pv); + } + } const pill = document.getElementById('mode-pill'); pill.className = 'mode-pill ' + (mode==='Replay'?'replay':'waiting'); document.getElementById('mode-text').textContent = mode.toUpperCase(); diff --git a/src/SimSteward.Plugin.Tests/DataCaptureSuiteLokiIntegrationTests.cs b/src/SimSteward.Plugin.Tests/DataCaptureSuiteLokiIntegrationTests.cs new file mode 100644 index 0000000..e6eea61 --- /dev/null +++ b/src/SimSteward.Plugin.Tests/DataCaptureSuiteLokiIntegrationTests.cs @@ -0,0 +1,1134 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Net.Http; +using Newtonsoft.Json.Linq; +using SimSteward.Observability; +using Xunit; + +namespace SimSteward.Plugin.Tests +{ + /// + /// End-to-end integration tests that query Loki after a real Data Capture Suite run. + /// Requires iRacing open with a full race replay loaded, the suite run to completion, and Alloy to have ingested logs. + /// + /// Enable with env vars: + /// RUN_CAPTURE_SUITE_LOKI_ASSERT=1 (master gate — all tests skip when absent) + /// LOKI_QUERY_URL or SIMSTEWARD_LOKI_URL (Loki base URL, e.g. http://localhost:3100) + /// CAPTURE_SUITE_TEST_RUN_ID (optional GUID — narrows all queries to a specific run) + /// + public class DataCaptureSuiteLokiIntegrationTests + { + // ── Infrastructure ────────────────────────────────────────────────────── + + private const int FrameTolerance = 60; + + private static readonly string _masterGate = + Environment.GetEnvironmentVariable("RUN_CAPTURE_SUITE_LOKI_ASSERT"); + private static readonly string _baseUrl = + (Environment.GetEnvironmentVariable("LOKI_QUERY_URL") + ?? Environment.GetEnvironmentVariable("SIMSTEWARD_LOKI_URL") ?? "").TrimEnd('/'); + private static readonly string _testRunId = + Environment.GetEnvironmentVariable("CAPTURE_SUITE_TEST_RUN_ID") ?? ""; + + // Lazy cache for T0 lines — queried once, reused by many tests + private List _t0Cache; + + private void SkipIfDisabled() + { + Skip.IfNot(string.Equals(_masterGate, "1", StringComparison.Ordinal), + "Set RUN_CAPTURE_SUITE_LOKI_ASSERT=1 and SIMSTEWARD_LOKI_URL to enable."); + if (string.IsNullOrWhiteSpace(_baseUrl)) + Assert.Fail("RUN_CAPTURE_SUITE_LOKI_ASSERT=1 requires LOKI_QUERY_URL or SIMSTEWARD_LOKI_URL."); + } + + private List QueryLines(string eventName) + { + var runFilter = string.IsNullOrEmpty(_testRunId) + ? "" + : $" | test_run_id = \"{_testRunId}\""; + var logql = + $"{{app=\"sim-steward\", component=\"simhub-plugin\"}} | json | event = \"{eventName}\"{runFilter}"; + using var client = new HttpClient { Timeout = TimeSpan.FromSeconds(15) }; + if (!LokiQueryRangeClient.TryQueryRange(_baseUrl, logql, client, TimeSpan.FromMinutes(60), + out var lines, out var err)) + Assert.Fail($"Loki query failed for event={eventName}: {err}"); + var parsed = new List(); + foreach (var line in lines) + { + try { parsed.Add(JObject.Parse(line)); } + catch { /* ignore malformed */ } + } + return parsed; + } + + private List GetT0() => _t0Cache ??= QueryLines(DataCaptureSuiteConstants.EventGroundTruth) + .OrderBy(j => FieldInt(j, "incident_index") ?? 999).ToList(); + + private JObject T0ByIndex(int idx) => + GetT0().FirstOrDefault(j => FieldInt(j, "incident_index") == idx); + + private static string Field(JObject j, string name) => + j?[name]?.ToString(); + + private static int? FieldInt(JObject j, string name) + { + var s = Field(j, name); + return int.TryParse(s, out var v) ? v : (int?)null; + } + + private static double? FieldDouble(JObject j, string name) + { + var s = Field(j, name); + return double.TryParse(s, System.Globalization.NumberStyles.Any, + System.Globalization.CultureInfo.InvariantCulture, out var v) ? v : (double?)null; + } + + // Parses field as JArray or comma/semicolon-separated string → list of trimmed strings + private static List FieldArray(JObject j, string name) + { + var token = j?[name]; + if (token == null) return new List(); + if (token is JArray arr) return arr.Select(t => t.ToString()).ToList(); + var s = token.ToString(); + return s.Split(new[] { ',', ';', ' ' }, StringSplitOptions.RemoveEmptyEntries) + .Select(x => x.Trim()).ToList(); + } + + // Collect all events across all 13 types for common-field assertions + private List AllEvents() + { + var all = new List(); + foreach (var ev in new[] + { + DataCaptureSuiteConstants.EventSuiteStarted, + DataCaptureSuiteConstants.EventSuiteComplete, + DataCaptureSuiteConstants.EventGroundTruth, + DataCaptureSuiteConstants.EventSpeedSample, + DataCaptureSuiteConstants.EventVariableInventory, + DataCaptureSuiteConstants.EventPlayerSnapshot, + DataCaptureSuiteConstants.EventDriverRoster, + DataCaptureSuiteConstants.EventCameraSwitchDriver, + DataCaptureSuiteConstants.EventCameraViewSample, + DataCaptureSuiteConstants.EventCameraViewSummary, + DataCaptureSuiteConstants.EventSessionResults, + DataCaptureSuiteConstants.EventIncidentReseek, + DataCaptureSuiteConstants.EventFfSweepResult, + }) + all.AddRange(QueryLines(ev)); + return all; + } + + // ── Suite Lifecycle ────────────────────────────────────────────────────── + + [SkippableFact] + public void Loki_SuiteStarted_IsPresent() + { + SkipIfDisabled(); + var lines = QueryLines(DataCaptureSuiteConstants.EventSuiteStarted); + Assert.True(lines.Count >= 1, + $"Expected sdk_capture_suite_started in Loki; got {lines.Count}."); + } + + [SkippableFact] + public void Loki_SuiteStarted_ExactlyOne() + { + SkipIfDisabled(); + var lines = QueryLines(DataCaptureSuiteConstants.EventSuiteStarted); + Assert.Single(lines); + } + + [SkippableFact] + public void Loki_SuiteStarted_HasValidGuidTestRunId() + { + SkipIfDisabled(); + var lines = QueryLines(DataCaptureSuiteConstants.EventSuiteStarted); + Skip.If(lines.Count == 0, "No suite_started event — run the suite first."); + var id = Field(lines[0], "test_run_id"); + Assert.True(Guid.TryParse(id, out _), + $"test_run_id '{id}' is not a valid GUID."); + } + + [SkippableFact] + public void Loki_SuiteStarted_TotalStepsIsTen() + { + SkipIfDisabled(); + var lines = QueryLines(DataCaptureSuiteConstants.EventSuiteStarted); + Skip.If(lines.Count == 0, "No suite_started event."); + Assert.Equal(10, FieldInt(lines[0], "total_steps") ?? 0); + } + + [SkippableFact] + public void Loki_SuiteComplete_IsPresent() + { + SkipIfDisabled(); + var lines = QueryLines(DataCaptureSuiteConstants.EventSuiteComplete); + Assert.True(lines.Count >= 1, + $"Expected sdk_capture_suite_complete; got {lines.Count}."); + } + + [SkippableFact] + public void Loki_SuiteComplete_TestRunIdMatchesStarted() + { + SkipIfDisabled(); + var started = QueryLines(DataCaptureSuiteConstants.EventSuiteStarted); + var complete = QueryLines(DataCaptureSuiteConstants.EventSuiteComplete); + Skip.If(started.Count == 0 || complete.Count == 0, "Need both started and complete events."); + Assert.Equal(Field(started[0], "test_run_id"), Field(complete[0], "test_run_id")); + } + + // ── All-Events Common Fields ───────────────────────────────────────────── + + [SkippableFact] + public void Loki_AllThirteenEventTypesPresent() + { + SkipIfDisabled(); + var missing = new List(); + foreach (var ev in new[] + { + DataCaptureSuiteConstants.EventSuiteStarted, + DataCaptureSuiteConstants.EventSuiteComplete, + DataCaptureSuiteConstants.EventGroundTruth, + DataCaptureSuiteConstants.EventSpeedSample, + DataCaptureSuiteConstants.EventVariableInventory, + DataCaptureSuiteConstants.EventPlayerSnapshot, + DataCaptureSuiteConstants.EventDriverRoster, + DataCaptureSuiteConstants.EventCameraSwitchDriver, + DataCaptureSuiteConstants.EventCameraViewSample, + DataCaptureSuiteConstants.EventCameraViewSummary, + DataCaptureSuiteConstants.EventSessionResults, + DataCaptureSuiteConstants.EventIncidentReseek, + DataCaptureSuiteConstants.EventFfSweepResult, + }) + if (QueryLines(ev).Count == 0) missing.Add(ev); + Assert.True(missing.Count == 0, $"Missing events in Loki: {string.Join(", ", missing)}"); + } + + [SkippableFact] + public void Loki_AllEvents_HaveTestRunId() + { + SkipIfDisabled(); + var bad = AllEvents().Where(j => string.IsNullOrEmpty(Field(j, "test_run_id"))).ToList(); + Assert.True(bad.Count == 0, $"{bad.Count} events missing test_run_id."); + } + + [SkippableFact] + public void Loki_AllEvents_TestRunIdIsConsistent() + { + SkipIfDisabled(); + var all = AllEvents(); + var ids = all.Select(j => Field(j, "test_run_id")).Distinct().ToList(); + Assert.True(ids.Count == 1, $"Expected 1 unique test_run_id; got {ids.Count}: {string.Join(", ", ids)}"); + } + + [SkippableFact] + public void Loki_AllEvents_TestRunIdIsValidGuid() + { + SkipIfDisabled(); + var bad = AllEvents() + .Where(j => !Guid.TryParse(Field(j, "test_run_id") ?? "", out _)) + .ToList(); + Assert.True(bad.Count == 0, $"{bad.Count} events have non-GUID test_run_id."); + } + + [SkippableFact] + public void Loki_AllEvents_HaveTestingFlag() + { + SkipIfDisabled(); + var bad = AllEvents().Where(j => Field(j, "testing") != "true").ToList(); + Assert.True(bad.Count == 0, $"{bad.Count} events missing testing=true."); + } + + [SkippableFact] + public void Loki_AllEvents_HaveDomainTest() + { + SkipIfDisabled(); + var bad = AllEvents().Where(j => Field(j, "domain") != "test").ToList(); + Assert.True(bad.Count == 0, $"{bad.Count} events missing domain=test."); + } + + [SkippableFact] + public void Loki_AllEvents_HaveSubsessionId() + { + SkipIfDisabled(); + var bad = AllEvents().Where(j => string.IsNullOrEmpty(Field(j, "subsession_id"))).ToList(); + Assert.True(bad.Count == 0, $"{bad.Count} events missing subsession_id."); + } + + [SkippableFact] + public void Loki_AllEvents_SubsessionIdIsConsistent() + { + SkipIfDisabled(); + var ids = AllEvents().Select(j => Field(j, "subsession_id")).Distinct().ToList(); + Assert.True(ids.Count == 1, $"Expected 1 unique subsession_id; got {ids.Count}."); + } + + [SkippableFact] + public void Loki_AllEvents_HaveTrackDisplayName() + { + SkipIfDisabled(); + var bad = AllEvents().Where(j => string.IsNullOrEmpty(Field(j, "track_display_name"))).ToList(); + Assert.True(bad.Count == 0, $"{bad.Count} events missing track_display_name."); + } + + [SkippableFact] + public void Loki_AllEvents_TrackDisplayNameIsConsistent() + { + SkipIfDisabled(); + var names = AllEvents().Select(j => Field(j, "track_display_name")).Distinct().ToList(); + Assert.True(names.Count == 1, $"Expected 1 unique track_display_name; got {names.Count}."); + } + + [SkippableFact] + public void Loki_AllEvents_HaveTestTag() + { + SkipIfDisabled(); + var bad = AllEvents().Where(j => string.IsNullOrEmpty(Field(j, "test_tag"))).ToList(); + Assert.True(bad.Count == 0, $"{bad.Count} events missing test_tag."); + } + + // ── T0 — Ground Truth ──────────────────────────────────────────────────── + + [SkippableFact] + public void Loki_GroundTruth_HasThreeIncidents() + { + SkipIfDisabled(); + Assert.Equal(3, GetT0().Count); + } + + [SkippableFact] + public void Loki_GroundTruth_IncidentIndexesAreSequential() + { + SkipIfDisabled(); + var indexes = GetT0().Select(j => FieldInt(j, "incident_index")).OrderBy(x => x).ToList(); + Assert.Equal(new int?[] { 0, 1, 2 }, indexes); + } + + [SkippableFact] + public void Loki_GroundTruth_ReplayFramesAreUnique() + { + SkipIfDisabled(); + var frames = GetT0().Select(j => Field(j, "replay_frame")).ToList(); + Assert.Equal(frames.Count, frames.Distinct().Count()); + } + + [SkippableFact] + public void Loki_GroundTruth_ReplayFramesArePositive() + { + SkipIfDisabled(); + var bad = GetT0().Where(j => (FieldInt(j, "replay_frame") ?? -1) <= 0).ToList(); + Assert.True(bad.Count == 0, $"{bad.Count} ground truth incidents have replay_frame <= 0."); + } + + [SkippableFact] + public void Loki_GroundTruth_CarIdxIsValid() + { + SkipIfDisabled(); + var bad = GetT0().Where(j => (FieldInt(j, "car_idx") ?? -1) < 0).ToList(); + Assert.True(bad.Count == 0, $"{bad.Count} ground truth incidents have invalid car_idx."); + } + + [SkippableFact] + public void Loki_GroundTruth_CarIdxesAreDistinct() + { + SkipIfDisabled(); + var carIdxes = GetT0().Select(j => Field(j, "car_idx")).ToList(); + Assert.Equal(carIdxes.Count, carIdxes.Distinct().Count()); + } + + [SkippableFact] + public void Loki_GroundTruth_SessionTimeIsPositive() + { + SkipIfDisabled(); + var bad = GetT0().Where(j => (FieldDouble(j, "replay_session_time_sec") ?? -1) <= 0).ToList(); + Assert.True(bad.Count == 0, $"{bad.Count} ground truth incidents have session_time <= 0."); + } + + [SkippableFact] + public void Loki_GroundTruth_SessionTimesAreIncreasing() + { + SkipIfDisabled(); + var t0 = GetT0(); + Skip.If(t0.Count < 3, "Need 3 T0 events."); + var times = t0.Select(j => FieldDouble(j, "replay_session_time_sec") ?? 0).ToList(); + Assert.True(times[0] < times[1] && times[1] < times[2], + $"Session times not increasing: {times[0]} {times[1]} {times[2]}"); + } + + [SkippableFact] + public void Loki_GroundTruth_LapDistPctInRange() + { + SkipIfDisabled(); + var bad = GetT0().Where(j => + { + var v = FieldDouble(j, "lap_dist_pct"); + return v == null || v < 0.0 || v > 1.0; + }).ToList(); + Assert.True(bad.Count == 0, $"{bad.Count} ground truth incidents have lap_dist_pct out of [0,1]."); + } + + [SkippableFact] + public void Loki_GroundTruth_LapNumNonNegative() + { + SkipIfDisabled(); + var bad = GetT0().Where(j => (FieldInt(j, "lap_num") ?? -1) < 0).ToList(); + Assert.True(bad.Count == 0, $"{bad.Count} ground truth incidents have lap_num < 0."); + } + + [SkippableFact] + public void Loki_GroundTruth_DriverNameNonEmpty() + { + SkipIfDisabled(); + var bad = GetT0().Where(j => string.IsNullOrEmpty(Field(j, "driver_name"))).ToList(); + Assert.True(bad.Count == 0, $"{bad.Count} ground truth incidents missing driver_name."); + } + + [SkippableFact] + public void Loki_GroundTruth_CarNumberNonEmpty() + { + SkipIfDisabled(); + var bad = GetT0().Where(j => string.IsNullOrEmpty(Field(j, "car_number"))).ToList(); + Assert.True(bad.Count == 0, $"{bad.Count} ground truth incidents missing car_number."); + } + + [SkippableFact] + public void Loki_GroundTruth_CustIdNonEmpty() + { + SkipIfDisabled(); + var bad = GetT0().Where(j => string.IsNullOrEmpty(Field(j, "cust_id"))).ToList(); + Assert.True(bad.Count == 0, $"{bad.Count} ground truth incidents missing cust_id."); + } + + // ── T1 — Speed Sweep ───────────────────────────────────────────────────── + + [SkippableFact] + public void Loki_SpeedSweep_HasFourSamples() + { + SkipIfDisabled(); + Assert.Equal(4, QueryLines(DataCaptureSuiteConstants.EventSpeedSample).Count); + } + + [SkippableFact] + public void Loki_SpeedSweep_CoversAllFourSpeeds() + { + SkipIfDisabled(); + var speeds = QueryLines(DataCaptureSuiteConstants.EventSpeedSample) + .Select(j => FieldInt(j, "requested_speed")) + .OrderBy(x => x).ToList(); + Assert.Equal(new int?[] { 1, 4, 8, 16 }, speeds); + } + + [SkippableFact] + public void Loki_SpeedSweep_NoDuplicateSpeeds() + { + SkipIfDisabled(); + var speeds = QueryLines(DataCaptureSuiteConstants.EventSpeedSample) + .Select(j => Field(j, "requested_speed")).ToList(); + Assert.Equal(speeds.Count, speeds.Distinct().Count()); + } + + [SkippableFact] + public void Loki_SpeedSweep_EffectiveHzMatchesFormula() + { + SkipIfDisabled(); + foreach (var j in QueryLines(DataCaptureSuiteConstants.EventSpeedSample)) + { + var speed = FieldInt(j, "requested_speed") ?? 0; + var hz = FieldDouble(j, "effective_session_hz") ?? -1; + var expected = 60.0 / speed; + Assert.True(Math.Abs(hz - expected) < 0.01, + $"speed={speed}: expected hz={expected} but got {hz}"); + } + } + + [SkippableFact] + public void Loki_SpeedSweep_EffectiveHzDecreasesWithSpeed() + { + SkipIfDisabled(); + var samples = QueryLines(DataCaptureSuiteConstants.EventSpeedSample) + .OrderBy(j => FieldInt(j, "requested_speed") ?? 0).ToList(); + Skip.If(samples.Count < 4, "Need 4 speed samples."); + for (int i = 0; i < samples.Count - 1; i++) + { + var hz1 = FieldDouble(samples[i], "effective_session_hz") ?? 0; + var hz2 = FieldDouble(samples[i + 1], "effective_session_hz") ?? 0; + Assert.True(hz1 > hz2, + $"Hz should decrease with speed; got hz[{i}]={hz1} >= hz[{i+1}]={hz2}"); + } + } + + [SkippableFact] + public void Loki_SpeedSweep_DetectionRateInRange() + { + SkipIfDisabled(); + var bad = QueryLines(DataCaptureSuiteConstants.EventSpeedSample).Where(j => + { + var r = FieldDouble(j, "detection_rate_pct") ?? -1; + return r < 0 || r > 100; + }).ToList(); + Assert.True(bad.Count == 0, $"{bad.Count} speed samples have detection_rate_pct out of [0,100]."); + } + + [SkippableFact] + public void Loki_SpeedSweep_HitPlusMissEqualsThree() + { + SkipIfDisabled(); + foreach (var j in QueryLines(DataCaptureSuiteConstants.EventSpeedSample)) + { + var hit = FieldInt(j, "ground_truth_hit_count") ?? -1; + var miss = FieldInt(j, "ground_truth_miss_count") ?? -1; + Assert.Equal(3, hit + miss); + } + } + + [SkippableFact] + public void Loki_SpeedSweep_TickCountPositive() + { + SkipIfDisabled(); + var bad = QueryLines(DataCaptureSuiteConstants.EventSpeedSample) + .Where(j => (FieldInt(j, "tick_count") ?? 0) <= 0).ToList(); + Assert.True(bad.Count == 0, $"{bad.Count} speed samples have tick_count <= 0."); + } + + [SkippableFact] + public void Loki_SpeedSweep_IncidentsDetectedNonNegative() + { + SkipIfDisabled(); + var bad = QueryLines(DataCaptureSuiteConstants.EventSpeedSample) + .Where(j => (FieldInt(j, "incidents_detected") ?? -1) < 0).ToList(); + Assert.True(bad.Count == 0, $"{bad.Count} speed samples have incidents_detected < 0."); + } + + [SkippableFact] + public void Loki_SpeedSweep_1x_AllHitsDetected() + { + SkipIfDisabled(); + var sample1x = QueryLines(DataCaptureSuiteConstants.EventSpeedSample) + .FirstOrDefault(j => FieldInt(j, "requested_speed") == 1); + Skip.If(sample1x == null, "No speed=1 sample found."); + Assert.Equal(3, FieldInt(sample1x, "ground_truth_hit_count") ?? -1); + } + + [SkippableFact] + public void Loki_SpeedSweep_1x_DetectionRate100Pct() + { + SkipIfDisabled(); + var sample1x = QueryLines(DataCaptureSuiteConstants.EventSpeedSample) + .FirstOrDefault(j => FieldInt(j, "requested_speed") == 1); + Skip.If(sample1x == null, "No speed=1 sample found."); + var rate = FieldDouble(sample1x, "detection_rate_pct") ?? -1; + Assert.True(Math.Abs(rate - 100.0) < 0.01, $"1x detection_rate_pct expected 100; got {rate}"); + } + + [SkippableFact] + public void Loki_SpeedSweep_1x_MissCountIsZero() + { + SkipIfDisabled(); + var sample1x = QueryLines(DataCaptureSuiteConstants.EventSpeedSample) + .FirstOrDefault(j => FieldInt(j, "requested_speed") == 1); + Skip.If(sample1x == null, "No speed=1 sample found."); + Assert.Equal(0, FieldInt(sample1x, "ground_truth_miss_count") ?? -1); + } + + [SkippableFact] + public void Loki_SpeedSweep_TickCountDecreasesWithSpeed() + { + SkipIfDisabled(); + var samples = QueryLines(DataCaptureSuiteConstants.EventSpeedSample) + .OrderBy(j => FieldInt(j, "requested_speed") ?? 0).ToList(); + Skip.If(samples.Count < 4, "Need 4 speed samples."); + for (int i = 0; i < samples.Count - 1; i++) + { + var tc1 = FieldInt(samples[i], "tick_count") ?? 0; + var tc2 = FieldInt(samples[i + 1], "tick_count") ?? 0; + Assert.True(tc1 > tc2, + $"tick_count should decrease with speed; got tick_count[{i}]={tc1} <= tick_count[{i+1}]={tc2}"); + } + } + + [SkippableFact] + public void Loki_SpeedSweep_AllSpeedsMatchPlanConstants() + { + SkipIfDisabled(); + var expected = new HashSet(DataCaptureSuiteConstants.SpeedSweepSpeeds); + foreach (var j in QueryLines(DataCaptureSuiteConstants.EventSpeedSample)) + { + var s = FieldInt(j, "requested_speed") ?? -1; + Assert.True(expected.Contains(s), + $"requested_speed={s} is not in DataCaptureSuiteConstants.SpeedSweepSpeeds."); + } + } + + // ── T2 — Variable Inventory ────────────────────────────────────────────── + + [SkippableFact] + public void Loki_VariableInventory_IsPresent() + { + SkipIfDisabled(); + Assert.True(QueryLines(DataCaptureSuiteConstants.EventVariableInventory).Count >= 1, + "sdk_capture_variable_inventory not found in Loki."); + } + + [SkippableFact] + public void Loki_VariableInventory_CountAboveThreshold() + { + SkipIfDisabled(); + var lines = QueryLines(DataCaptureSuiteConstants.EventVariableInventory); + Skip.If(lines.Count == 0, "No variable inventory event."); + Assert.True((FieldInt(lines[0], "variable_count") ?? 0) > 50, + $"variable_count expected > 50; got {Field(lines[0], "variable_count")}"); + } + + [SkippableFact] + public void Loki_VariableInventory_KnownVarsPresent() + { + SkipIfDisabled(); + var lines = QueryLines(DataCaptureSuiteConstants.EventVariableInventory); + Skip.If(lines.Count == 0, "No variable inventory event."); + var names = FieldArray(lines[0], "variable_names"); + foreach (var expected in new[] { "Speed", "RPM", "ReplayFrameNum" }) + Assert.True(names.Contains(expected), + $"Expected SDK variable '{expected}' in variable_names; got: {string.Join(", ", names.Take(20))}"); + } + + // ── T3 — Player Snapshot ───────────────────────────────────────────────── + + [SkippableFact] + public void Loki_PlayerSnapshot_IsPresent() + { + SkipIfDisabled(); + Assert.True(QueryLines(DataCaptureSuiteConstants.EventPlayerSnapshot).Count >= 1, + "sdk_capture_player_snapshot not found in Loki."); + } + + [SkippableFact] + public void Loki_PlayerSnapshot_HasSpeedMps() + { + SkipIfDisabled(); + var lines = QueryLines(DataCaptureSuiteConstants.EventPlayerSnapshot); + Skip.If(lines.Count == 0, "No player snapshot event."); + Assert.NotNull(FieldDouble(lines[0], "speed_mps")); + } + + [SkippableFact] + public void Loki_PlayerSnapshot_SpeedMpsNonNegative() + { + SkipIfDisabled(); + var lines = QueryLines(DataCaptureSuiteConstants.EventPlayerSnapshot); + Skip.If(lines.Count == 0, "No player snapshot event."); + Assert.True((FieldDouble(lines[0], "speed_mps") ?? -1) >= 0, + "speed_mps must be >= 0."); + } + + [SkippableFact] + public void Loki_PlayerSnapshot_HasGear() + { + SkipIfDisabled(); + var lines = QueryLines(DataCaptureSuiteConstants.EventPlayerSnapshot); + Skip.If(lines.Count == 0, "No player snapshot event."); + Assert.NotNull(FieldInt(lines[0], "gear")); + } + + [SkippableFact] + public void Loki_PlayerSnapshot_HasRpm() + { + SkipIfDisabled(); + var lines = QueryLines(DataCaptureSuiteConstants.EventPlayerSnapshot); + Skip.If(lines.Count == 0, "No player snapshot event."); + Assert.NotNull(FieldDouble(lines[0], "rpm")); + } + + [SkippableFact] + public void Loki_PlayerSnapshot_LapDistPctInRange() + { + SkipIfDisabled(); + var lines = QueryLines(DataCaptureSuiteConstants.EventPlayerSnapshot); + Skip.If(lines.Count == 0, "No player snapshot event."); + var v = FieldDouble(lines[0], "lap_dist_pct") ?? -1; + Assert.InRange(v, 0.0, 1.0); + } + + // ── T4 — Driver Roster ─────────────────────────────────────────────────── + + [SkippableFact] + public void Loki_DriverRoster_IsPresent() + { + SkipIfDisabled(); + Assert.True(QueryLines(DataCaptureSuiteConstants.EventDriverRoster).Count >= 1, + "sdk_capture_driver_roster not found in Loki."); + } + + [SkippableFact] + public void Loki_DriverRoster_DriverCountAtLeastTwo() + { + SkipIfDisabled(); + var lines = QueryLines(DataCaptureSuiteConstants.EventDriverRoster); + Skip.If(lines.Count == 0, "No driver roster event."); + Assert.True((FieldInt(lines[0], "driver_count") ?? 0) >= 2, + $"driver_count expected >= 2; got {Field(lines[0], "driver_count")}"); + } + + [SkippableFact] + public void Loki_DriverRoster_ContainsT0CarIdx0() + { + SkipIfDisabled(); + var roster = QueryLines(DataCaptureSuiteConstants.EventDriverRoster); + Skip.If(roster.Count == 0 || T0ByIndex(0) == null, "Need roster and T0[0]."); + var carIdxes = FieldArray(roster[0], "car_idxes"); + Assert.Contains(Field(T0ByIndex(0), "car_idx"), carIdxes); + } + + [SkippableFact] + public void Loki_DriverRoster_ContainsT0CarIdx1() + { + SkipIfDisabled(); + var roster = QueryLines(DataCaptureSuiteConstants.EventDriverRoster); + Skip.If(roster.Count == 0 || T0ByIndex(1) == null, "Need roster and T0[1]."); + Assert.Contains(Field(T0ByIndex(1), "car_idx"), FieldArray(roster[0], "car_idxes")); + } + + [SkippableFact] + public void Loki_DriverRoster_ContainsT0CarIdx2() + { + SkipIfDisabled(); + var roster = QueryLines(DataCaptureSuiteConstants.EventDriverRoster); + Skip.If(roster.Count == 0 || T0ByIndex(2) == null, "Need roster and T0[2]."); + Assert.Contains(Field(T0ByIndex(2), "car_idx"), FieldArray(roster[0], "car_idxes")); + } + + // ── T5 — Camera Switch ─────────────────────────────────────────────────── + + [SkippableFact] + public void Loki_CameraSwitch_IsPresent() + { + SkipIfDisabled(); + Assert.True(QueryLines(DataCaptureSuiteConstants.EventCameraSwitchDriver).Count >= 1, + "sdk_capture_camera_switch_driver not found in Loki."); + } + + [SkippableFact] + public void Loki_CameraSwitch_CamCarIdxIsValid() + { + SkipIfDisabled(); + var lines = QueryLines(DataCaptureSuiteConstants.EventCameraSwitchDriver); + Skip.If(lines.Count == 0, "No camera switch event."); + Assert.True((FieldInt(lines[0], "cam_car_idx") ?? -1) >= 0, + "cam_car_idx must be >= 0."); + } + + [SkippableFact] + public void Loki_CameraSwitch_ConfirmedMatchIsTrue() + { + SkipIfDisabled(); + var lines = QueryLines(DataCaptureSuiteConstants.EventCameraSwitchDriver); + Skip.If(lines.Count == 0, "No camera switch event."); + Assert.Equal("true", Field(lines[0], "confirmed_match")); + } + + [SkippableFact] + public void Loki_CameraSwitch_CarIdxMatchesT0Index0() + { + SkipIfDisabled(); + var lines = QueryLines(DataCaptureSuiteConstants.EventCameraSwitchDriver); + Skip.If(lines.Count == 0 || T0ByIndex(0) == null, "Need camera switch and T0[0]."); + Assert.Equal(Field(T0ByIndex(0), "car_idx"), Field(lines[0], "cam_car_idx")); + } + + [SkippableFact] + public void Loki_CameraSwitch_GroundTruthIncidentIndexIsZero() + { + SkipIfDisabled(); + var lines = QueryLines(DataCaptureSuiteConstants.EventCameraSwitchDriver); + Skip.If(lines.Count == 0, "No camera switch event."); + Assert.Equal("0", Field(lines[0], "ground_truth_incident_index")); + } + + // ── T5b — Camera View Cycle ────────────────────────────────────────────── + + [SkippableFact] + public void Loki_CameraViewSamples_Present() + { + SkipIfDisabled(); + Assert.True(QueryLines(DataCaptureSuiteConstants.EventCameraViewSample).Count >= 1, + "sdk_capture_camera_view_sample not found in Loki."); + } + + [SkippableFact] + public void Loki_CameraViewSamples_GroupNamesVary() + { + SkipIfDisabled(); + var names = QueryLines(DataCaptureSuiteConstants.EventCameraViewSample) + .Select(j => Field(j, "cam_group_name")).Distinct().ToList(); + Assert.True(names.Count >= 2, + $"Expected >= 2 distinct cam_group_name values; got {names.Count}."); + } + + [SkippableFact] + public void Loki_CameraViewSamples_NoDuplicateGroupNums() + { + SkipIfDisabled(); + var nums = QueryLines(DataCaptureSuiteConstants.EventCameraViewSample) + .Select(j => Field(j, "cam_group_num")).ToList(); + Assert.Equal(nums.Count, nums.Distinct().Count()); + } + + [SkippableFact] + public void Loki_CameraViewSamples_AllHaveGroundTruthIndex0() + { + SkipIfDisabled(); + var bad = QueryLines(DataCaptureSuiteConstants.EventCameraViewSample) + .Where(j => Field(j, "ground_truth_incident_index") != "0").ToList(); + Assert.True(bad.Count == 0, + $"{bad.Count} camera view samples have ground_truth_incident_index != 0."); + } + + [SkippableFact] + public void Loki_CameraViewSamples_CamCarIdxConsistentAcrossSamples() + { + SkipIfDisabled(); + var ids = QueryLines(DataCaptureSuiteConstants.EventCameraViewSample) + .Select(j => Field(j, "cam_car_idx")).Distinct().ToList(); + Assert.True(ids.Count == 1, + $"Expected consistent cam_car_idx across samples; got {ids.Count} distinct values."); + } + + [SkippableFact] + public void Loki_CameraViewSamples_CamCarIdxMatchesT0() + { + SkipIfDisabled(); + var samples = QueryLines(DataCaptureSuiteConstants.EventCameraViewSample); + Skip.If(samples.Count == 0 || T0ByIndex(0) == null, "Need camera view samples and T0[0]."); + Assert.Equal(Field(T0ByIndex(0), "car_idx"), Field(samples[0], "cam_car_idx")); + } + + [SkippableFact] + public void Loki_CameraViewSamples_HaveSpeedMpsField() + { + SkipIfDisabled(); + var bad = QueryLines(DataCaptureSuiteConstants.EventCameraViewSample) + .Where(j => Field(j, "speed_mps") == null).ToList(); + Assert.True(bad.Count == 0, $"{bad.Count} camera view samples missing speed_mps."); + } + + [SkippableFact] + public void Loki_CameraViewSamples_HaveGearField() + { + SkipIfDisabled(); + var bad = QueryLines(DataCaptureSuiteConstants.EventCameraViewSample) + .Where(j => Field(j, "gear") == null).ToList(); + Assert.True(bad.Count == 0, $"{bad.Count} camera view samples missing gear."); + } + + [SkippableFact] + public void Loki_CameraViewSummary_IsPresent() + { + SkipIfDisabled(); + Assert.True(QueryLines(DataCaptureSuiteConstants.EventCameraViewSummary).Count >= 1, + "sdk_capture_camera_view_summary not found in Loki."); + } + + [SkippableFact] + public void Loki_CameraViewSummary_GroupsTestedPositive() + { + SkipIfDisabled(); + var lines = QueryLines(DataCaptureSuiteConstants.EventCameraViewSummary); + Skip.If(lines.Count == 0, "No camera view summary."); + Assert.True((FieldInt(lines[0], "groups_tested") ?? 0) > 0, + "groups_tested must be > 0."); + } + + [SkippableFact] + public void Loki_CameraViewSummary_ConfirmedMatchesPositive() + { + SkipIfDisabled(); + var lines = QueryLines(DataCaptureSuiteConstants.EventCameraViewSummary); + Skip.If(lines.Count == 0, "No camera view summary."); + Assert.True((FieldInt(lines[0], "confirmed_matches") ?? 0) > 0, + "confirmed_matches must be > 0."); + } + + [SkippableFact] + public void Loki_CameraViewSummary_GroupsTestedMatchesSampleCount() + { + SkipIfDisabled(); + var summary = QueryLines(DataCaptureSuiteConstants.EventCameraViewSummary); + var samples = QueryLines(DataCaptureSuiteConstants.EventCameraViewSample); + Skip.If(summary.Count == 0, "No camera view summary."); + Assert.Equal(samples.Count, FieldInt(summary[0], "groups_tested") ?? -1); + } + + [SkippableFact] + public void Loki_CameraViewSummary_GroupNamesFieldPresent() + { + SkipIfDisabled(); + var lines = QueryLines(DataCaptureSuiteConstants.EventCameraViewSummary); + Skip.If(lines.Count == 0, "No camera view summary."); + Assert.True(FieldArray(lines[0], "group_names").Count > 0, + "group_names field should be non-empty."); + } + + // ── T6 — Session Results ───────────────────────────────────────────────── + + [SkippableFact] + public void Loki_SessionResults_IsPresent() + { + SkipIfDisabled(); + Assert.True(QueryLines(DataCaptureSuiteConstants.EventSessionResults).Count >= 1, + "sdk_capture_session_results not found in Loki."); + } + + [SkippableFact] + public void Loki_SessionResults_ContainsT0CarIdx0() + { + SkipIfDisabled(); + var results = QueryLines(DataCaptureSuiteConstants.EventSessionResults); + Skip.If(results.Count == 0 || T0ByIndex(0) == null, "Need session results and T0[0]."); + var carIdxes = FieldArray(results[0], "car_idxes") + .Concat(FieldArray(results[0], "driver_car_idxes")).ToList(); + Assert.Contains(Field(T0ByIndex(0), "car_idx"), carIdxes); + } + + [SkippableFact] + public void Loki_SessionResults_ContainsT0CarIdx1() + { + SkipIfDisabled(); + var results = QueryLines(DataCaptureSuiteConstants.EventSessionResults); + Skip.If(results.Count == 0 || T0ByIndex(1) == null, "Need session results and T0[1]."); + var carIdxes = FieldArray(results[0], "car_idxes") + .Concat(FieldArray(results[0], "driver_car_idxes")).ToList(); + Assert.Contains(Field(T0ByIndex(1), "car_idx"), carIdxes); + } + + [SkippableFact] + public void Loki_SessionResults_ContainsT0CarIdx2() + { + SkipIfDisabled(); + var results = QueryLines(DataCaptureSuiteConstants.EventSessionResults); + Skip.If(results.Count == 0 || T0ByIndex(2) == null, "Need session results and T0[2]."); + var carIdxes = FieldArray(results[0], "car_idxes") + .Concat(FieldArray(results[0], "driver_car_idxes")).ToList(); + Assert.Contains(Field(T0ByIndex(2), "car_idx"), carIdxes); + } + + // ── T7 — Incident Re-Seek ──────────────────────────────────────────────── + + [SkippableFact] + public void Loki_IncidentReseek_HasThreeEvents() + { + SkipIfDisabled(); + Assert.Equal(3, QueryLines(DataCaptureSuiteConstants.EventIncidentReseek).Count); + } + + [SkippableFact] + public void Loki_IncidentReseek_IncidentIndexesAreSequential() + { + SkipIfDisabled(); + var indexes = QueryLines(DataCaptureSuiteConstants.EventIncidentReseek) + .Select(j => FieldInt(j, "incident_index")).OrderBy(x => x).ToList(); + Assert.Equal(new int?[] { 0, 1, 2 }, indexes); + } + + [SkippableFact] + public void Loki_IncidentReseek_ReplayFramesPositive() + { + SkipIfDisabled(); + var bad = QueryLines(DataCaptureSuiteConstants.EventIncidentReseek) + .Where(j => (FieldInt(j, "replay_frame") ?? -1) <= 0).ToList(); + Assert.True(bad.Count == 0, $"{bad.Count} reseek events have replay_frame <= 0."); + } + + [SkippableFact] + public void Loki_IncidentReseek_FrameWithinToleranceOfT0_Index0() + { + SkipIfDisabled(); + var reseek = QueryLines(DataCaptureSuiteConstants.EventIncidentReseek) + .FirstOrDefault(j => FieldInt(j, "incident_index") == 0); + Skip.If(reseek == null || T0ByIndex(0) == null, "Need reseek[0] and T0[0]."); + var diff = Math.Abs((FieldInt(reseek, "replay_frame") ?? 0) - (FieldInt(T0ByIndex(0), "replay_frame") ?? 0)); + Assert.True(diff <= FrameTolerance, + $"Reseek[0] frame diff {diff} exceeds tolerance {FrameTolerance}."); + } + + [SkippableFact] + public void Loki_IncidentReseek_FrameWithinToleranceOfT0_Index1() + { + SkipIfDisabled(); + var reseek = QueryLines(DataCaptureSuiteConstants.EventIncidentReseek) + .FirstOrDefault(j => FieldInt(j, "incident_index") == 1); + Skip.If(reseek == null || T0ByIndex(1) == null, "Need reseek[1] and T0[1]."); + var diff = Math.Abs((FieldInt(reseek, "replay_frame") ?? 0) - (FieldInt(T0ByIndex(1), "replay_frame") ?? 0)); + Assert.True(diff <= FrameTolerance, + $"Reseek[1] frame diff {diff} exceeds tolerance {FrameTolerance}."); + } + + [SkippableFact] + public void Loki_IncidentReseek_FrameWithinToleranceOfT0_Index2() + { + SkipIfDisabled(); + var reseek = QueryLines(DataCaptureSuiteConstants.EventIncidentReseek) + .FirstOrDefault(j => FieldInt(j, "incident_index") == 2); + Skip.If(reseek == null || T0ByIndex(2) == null, "Need reseek[2] and T0[2]."); + var diff = Math.Abs((FieldInt(reseek, "replay_frame") ?? 0) - (FieldInt(T0ByIndex(2), "replay_frame") ?? 0)); + Assert.True(diff <= FrameTolerance, + $"Reseek[2] frame diff {diff} exceeds tolerance {FrameTolerance}."); + } + + [SkippableFact] + public void Loki_IncidentReseek_CarIdxMatchesT0_Index0() + { + SkipIfDisabled(); + var reseek = QueryLines(DataCaptureSuiteConstants.EventIncidentReseek) + .FirstOrDefault(j => FieldInt(j, "incident_index") == 0); + Skip.If(reseek == null || T0ByIndex(0) == null, "Need reseek[0] and T0[0]."); + Assert.Equal(Field(T0ByIndex(0), "car_idx"), Field(reseek, "car_idx")); + } + + [SkippableFact] + public void Loki_IncidentReseek_CarIdxMatchesT0_Index1() + { + SkipIfDisabled(); + var reseek = QueryLines(DataCaptureSuiteConstants.EventIncidentReseek) + .FirstOrDefault(j => FieldInt(j, "incident_index") == 1); + Skip.If(reseek == null || T0ByIndex(1) == null, "Need reseek[1] and T0[1]."); + Assert.Equal(Field(T0ByIndex(1), "car_idx"), Field(reseek, "car_idx")); + } + + [SkippableFact] + public void Loki_IncidentReseek_CarIdxMatchesT0_Index2() + { + SkipIfDisabled(); + var reseek = QueryLines(DataCaptureSuiteConstants.EventIncidentReseek) + .FirstOrDefault(j => FieldInt(j, "incident_index") == 2); + Skip.If(reseek == null || T0ByIndex(2) == null, "Need reseek[2] and T0[2]."); + Assert.Equal(Field(T0ByIndex(2), "car_idx"), Field(reseek, "car_idx")); + } + + // ── T8 — FF Sweep ──────────────────────────────────────────────────────── + + [SkippableFact] + public void Loki_FfSweep_IsPresent() + { + SkipIfDisabled(); + Assert.True(QueryLines(DataCaptureSuiteConstants.EventFfSweepResult).Count >= 1, + "sdk_capture_ff_sweep_result not found in Loki."); + } + + [SkippableFact] + public void Loki_FfSweep_IncidentsFoundPositive() + { + SkipIfDisabled(); + var lines = QueryLines(DataCaptureSuiteConstants.EventFfSweepResult); + Skip.If(lines.Count == 0, "No FF sweep event."); + Assert.True((FieldInt(lines[0], "incidents_found_count") ?? 0) > 0, + "incidents_found_count must be > 0."); + } + + [SkippableFact] + public void Loki_FfSweep_ContainsT0CarIdx0() + { + SkipIfDisabled(); + var lines = QueryLines(DataCaptureSuiteConstants.EventFfSweepResult); + Skip.If(lines.Count == 0 || T0ByIndex(0) == null, "Need FF sweep and T0[0]."); + Assert.Contains(Field(T0ByIndex(0), "car_idx"), FieldArray(lines[0], "detected_car_idxes")); + } + + [SkippableFact] + public void Loki_FfSweep_ContainsT0CarIdx1() + { + SkipIfDisabled(); + var lines = QueryLines(DataCaptureSuiteConstants.EventFfSweepResult); + Skip.If(lines.Count == 0 || T0ByIndex(1) == null, "Need FF sweep and T0[1]."); + Assert.Contains(Field(T0ByIndex(1), "car_idx"), FieldArray(lines[0], "detected_car_idxes")); + } + + [SkippableFact] + public void Loki_FfSweep_ContainsT0CarIdx2() + { + SkipIfDisabled(); + var lines = QueryLines(DataCaptureSuiteConstants.EventFfSweepResult); + Skip.If(lines.Count == 0 || T0ByIndex(2) == null, "Need FF sweep and T0[2]."); + Assert.Contains(Field(T0ByIndex(2), "car_idx"), FieldArray(lines[0], "detected_car_idxes")); + } + + [SkippableFact] + public void Loki_FfSweep_BuildCompletedSuccessfully() + { + SkipIfDisabled(); + var lines = QueryLines(DataCaptureSuiteConstants.EventFfSweepResult); + Skip.If(lines.Count == 0, "No FF sweep event."); + // Accept either build_status="complete" or success="true" + var status = Field(lines[0], "build_status"); + var success = Field(lines[0], "success"); + Assert.True( + string.Equals(status, "complete", StringComparison.OrdinalIgnoreCase) || + string.Equals(success, "true", StringComparison.OrdinalIgnoreCase), + $"FF sweep not marked complete: build_status={status}, success={success}"); + } + + // ── Cross-Test Consistency ─────────────────────────────────────────────── + + [SkippableFact] + public void Loki_CrossTest_T0AndT7AllThreeFramesWithinTolerance() + { + SkipIfDisabled(); + var reseeks = QueryLines(DataCaptureSuiteConstants.EventIncidentReseek) + .ToDictionary(j => FieldInt(j, "incident_index") ?? -1, j => j); + Skip.If(GetT0().Count < 3 || reseeks.Count < 3, "Need 3 T0 and 3 reseek events."); + for (int i = 0; i < 3; i++) + { + if (!reseeks.TryGetValue(i, out var r) || T0ByIndex(i) == null) continue; + var diff = Math.Abs((FieldInt(r, "replay_frame") ?? 0) - (FieldInt(T0ByIndex(i), "replay_frame") ?? 0)); + Assert.True(diff <= FrameTolerance, + $"Cross-test: incident {i} reseek frame diff {diff} > tolerance {FrameTolerance}."); + } + } + + [SkippableFact] + public void Loki_CrossTest_T5CamCarIdxMatchesT0Index0() + { + SkipIfDisabled(); + var camSwitch = QueryLines(DataCaptureSuiteConstants.EventCameraSwitchDriver); + Skip.If(camSwitch.Count == 0 || T0ByIndex(0) == null, "Need T5 and T0[0]."); + Assert.Equal(Field(T0ByIndex(0), "car_idx"), Field(camSwitch[0], "cam_car_idx")); + } + + [SkippableFact] + public void Loki_CrossTest_T8ContainsAllT0Cars() + { + SkipIfDisabled(); + var sweep = QueryLines(DataCaptureSuiteConstants.EventFfSweepResult); + Skip.If(sweep.Count == 0 || GetT0().Count < 3, "Need T8 and T0."); + var detected = FieldArray(sweep[0], "detected_car_idxes"); + for (int i = 0; i < 3; i++) + { + var t0 = T0ByIndex(i); + Skip.If(t0 == null, $"No T0[{i}]."); + Assert.Contains(Field(t0, "car_idx"), detected); + } + } + + [SkippableFact] + public void Loki_CrossTest_SpeedSweepCountMatchesPlanSpeeds() + { + SkipIfDisabled(); + Assert.Equal( + DataCaptureSuiteConstants.SpeedSweepSpeeds.Length, + QueryLines(DataCaptureSuiteConstants.EventSpeedSample).Count); + } + + [SkippableFact] + public void Loki_CrossTest_T4RosterContainsAllT0Cars() + { + SkipIfDisabled(); + var roster = QueryLines(DataCaptureSuiteConstants.EventDriverRoster); + Skip.If(roster.Count == 0 || GetT0().Count < 3, "Need T4 and T0."); + var carIdxes = FieldArray(roster[0], "car_idxes"); + for (int i = 0; i < 3; i++) + { + var t0 = T0ByIndex(i); + Skip.If(t0 == null, $"No T0[{i}]."); + Assert.Contains(Field(t0, "car_idx"), carIdxes); + } + } + } +} diff --git a/src/SimSteward.Plugin.Tests/DataCaptureSuiteTests.cs b/src/SimSteward.Plugin.Tests/DataCaptureSuiteTests.cs new file mode 100644 index 0000000..9744fce --- /dev/null +++ b/src/SimSteward.Plugin.Tests/DataCaptureSuiteTests.cs @@ -0,0 +1,810 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Reflection; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Xunit; + +namespace SimSteward.Plugin.Tests +{ + // ───────────────────────────────────────────────────────────────────────── + // Constants + // ───────────────────────────────────────────────────────────────────────── + + public class DataCaptureSuiteConstantsTests + { + // ── SpeedSweepSpeeds ──────────────────────────────────────────────── + + [Fact] public void SpeedSweepSpeeds_HasFourEntries() => + Assert.Equal(4, DataCaptureSuiteConstants.SpeedSweepSpeeds.Length); + + [Fact] public void SpeedSweepSpeeds_Contains_1_4_8_16() => + Assert.Equal(new[] { 1, 4, 8, 16 }, DataCaptureSuiteConstants.SpeedSweepSpeeds); + + [Fact] public void SpeedSweepSpeeds_AreInAscendingOrder() + { + var speeds = DataCaptureSuiteConstants.SpeedSweepSpeeds; + for (int i = 1; i < speeds.Length; i++) + Assert.True(speeds[i] > speeds[i - 1], $"Speed[{i}]={speeds[i]} not > Speed[{i-1}]={speeds[i-1]}"); + } + + [Fact] public void SpeedSweepSpeeds_AllPositive() => + Assert.All(DataCaptureSuiteConstants.SpeedSweepSpeeds, s => Assert.True(s > 0)); + + // ── Timing constants ──────────────────────────────────────────────── + + [Fact] public void LokiVerifyDelayMs_Is15000() => + Assert.Equal(15_000, DataCaptureSuiteConstants.LokiVerifyDelayMs); + + [Fact] public void NextIncidentCooldownTicks_Is150() => + Assert.Equal(150, DataCaptureSuiteConstants.NextIncidentCooldownTicks); + + [Fact] public void CamSettleTicks_Is60() => + Assert.Equal(60, DataCaptureSuiteConstants.CamSettleTicks); + + [Fact] public void FrameZeroStableTicks_Is4() => + Assert.Equal(4, DataCaptureSuiteConstants.FrameZeroStableTicks); + + [Fact] public void SeekTimeoutTicks_Is600() => + Assert.Equal(600, DataCaptureSuiteConstants.SeekTimeoutTicks); + + [Fact] public void SpeedSweepAdvanceFrames_Is300() => + Assert.Equal(300, DataCaptureSuiteConstants.SpeedSweepAdvanceFrames); + + // ── IncidentFlagMask matches the SDK flag constants ───────────────── + + [Fact] public void IncidentFlagMask_IncludesFurledFlag() => + Assert.NotEqual(0, DataCaptureSuiteConstants.IncidentFlagMask & ReplayIncidentIndexDetection.FurledSessionFlag); + + [Fact] public void IncidentFlagMask_IncludesRepairFlag() => + Assert.NotEqual(0, DataCaptureSuiteConstants.IncidentFlagMask & ReplayIncidentIndexDetection.RepairSessionFlag); + + [Fact] public void IncidentFlagMask_DoesNotMatchArbitraryBit() => + Assert.Equal(0, DataCaptureSuiteConstants.IncidentFlagMask & 0x01); + + // ── Event name catalogue ───────────────────────────────────────────── + + private static IEnumerable AllEventNames() + { + yield return DataCaptureSuiteConstants.EventGroundTruth; + yield return DataCaptureSuiteConstants.EventSpeedSample; + yield return DataCaptureSuiteConstants.EventVariableInventory; + yield return DataCaptureSuiteConstants.EventPlayerSnapshot; + yield return DataCaptureSuiteConstants.EventDriverRoster; + yield return DataCaptureSuiteConstants.EventCameraSwitchDriver; + yield return DataCaptureSuiteConstants.EventCameraViewSample; + yield return DataCaptureSuiteConstants.EventCameraViewSummary; + yield return DataCaptureSuiteConstants.EventSessionResults; + yield return DataCaptureSuiteConstants.EventIncidentReseek; + yield return DataCaptureSuiteConstants.EventFfSweepResult; + yield return DataCaptureSuiteConstants.EventSuiteStarted; + yield return DataCaptureSuiteConstants.EventSuiteComplete; + yield return DataCaptureSuiteConstants.EventDataDiscovery; + yield return DataCaptureSuiteConstants.Event60HzSummary; + yield return DataCaptureSuiteConstants.EventPreflightCheck; + yield return DataCaptureSuiteConstants.EventPreflightProbe; + } + + [Fact] public void AllEventNames_AreNonEmpty() => + Assert.All(AllEventNames(), n => Assert.False(string.IsNullOrWhiteSpace(n))); + + [Fact] public void AllEventNames_AreUnique() + { + var names = AllEventNames().ToList(); + var distinct = names.Distinct().ToList(); + Assert.Equal(names.Count, distinct.Count); + } + + [Fact] public void AllSdkCaptureEventNames_StartWithSdkCapture() + { + var suiteEvents = AllEventNames() + .Where(n => n != DataCaptureSuiteConstants.EventSuiteStarted && + n != DataCaptureSuiteConstants.EventSuiteComplete); + Assert.All(suiteEvents, n => Assert.StartsWith("sdk_capture_", n)); + } + + [Fact] public void AllEventNames_ContainOnlyLowercaseUnderscoreDigitChars() + { + Assert.All(AllEventNames(), name => + { + foreach (char c in name) + Assert.True(char.IsLower(c) || c == '_' || char.IsDigit(c), + $"Event name '{name}' contains unexpected char '{c}'"); + }); + } + + [Fact] public void EventCount_Is17() + { + Assert.Equal(17, AllEventNames().Count()); + } + } + + // ───────────────────────────────────────────────────────────────────────── + // Speed-sweep effective-Hz formula + // ───────────────────────────────────────────────────────────────────────── + + public class DataCaptureSuiteSpeedSweepTests + { + private static double EffectiveHz(int speed) => 60.0 / speed; + + [Theory] + [InlineData(1, 60.0)] + [InlineData(4, 15.0)] + [InlineData(8, 7.5)] + [InlineData(16, 3.75)] + public void EffectiveHz_MatchesFormula(int speed, double expected) => + Assert.Equal(expected, EffectiveHz(speed), precision: 4); + + [Fact] public void EffectiveHz_1x_IsHighestSamplingRate() => + Assert.True(EffectiveHz(1) > EffectiveHz(16)); + + [Fact] public void EffectiveHz_16x_IsLowestSamplingRate() => + Assert.Equal(3.75, EffectiveHz(DataCaptureSuiteConstants.SpeedSweepSpeeds.Last()), precision: 4); + + [Fact] public void SpeedSweepSpeeds_AllProducePositiveHz() => + Assert.All(DataCaptureSuiteConstants.SpeedSweepSpeeds, s => Assert.True(EffectiveHz(s) > 0)); + + // Detection rate formula: hits / 3 * 100 + [Theory] + [InlineData(3, 3, 100.0)] + [InlineData(2, 3, 66.6667)] + [InlineData(1, 3, 33.3333)] + [InlineData(0, 3, 0.0)] + public void DetectionRate_Formula(int hits, int total, double expected) + { + double rate = total > 0 ? hits * 100.0 / total : 0.0; + Assert.Equal(expected, rate, precision: 3); + } + + [Fact] public void DetectionRate_ZeroGtCount_IsZero() + { + double rate = 0 > 0 ? 1 * 100.0 / 0 : 0.0; + Assert.Equal(0.0, rate); + } + } + + // ───────────────────────────────────────────────────────────────────────── + // Frame tolerance (T7 re-seek validation: ±60 frames is a match) + // ───────────────────────────────────────────────────────────────────────── + + public class DataCaptureSuiteFrameToleranceTests + { + private static bool FrameMatch(int reseek, int groundTruth) => + Math.Abs(reseek - groundTruth) <= 60; + + [Fact] public void ExactMatch_IsTrue() => + Assert.True(FrameMatch(1000, 1000)); + + [Fact] public void Within60_IsTrue() => + Assert.True(FrameMatch(1040, 1000)); + + [Fact] public void AtBoundary60_IsTrue() => + Assert.True(FrameMatch(1060, 1000)); + + [Fact] public void At61_IsFalse() => + Assert.False(FrameMatch(1061, 1000)); + + [Fact] public void LargePositiveDiff_IsFalse() => + Assert.False(FrameMatch(2000, 1000)); + + [Fact] public void NegativeDiffWithin60_IsTrue() => + Assert.True(FrameMatch(960, 1000)); + + [Fact] public void NegativeDiffAtBoundary60_IsTrue() => + Assert.True(FrameMatch(940, 1000)); + + [Fact] public void NegativeDiffAt61_IsFalse() => + Assert.False(FrameMatch(939, 1000)); + + [Fact] public void ThreeReseeks_AllMatch_CountsThree() + { + var gt = new[] { 100, 500, 900 }; + var rs = new[] { 130, 490, 960 }; + int hits = gt.Zip(rs, (g, r) => FrameMatch(r, g) ? 1 : 0).Sum(); + Assert.Equal(3, hits); + } + + [Fact] public void ThreeReseeks_OneOutOfRange_CountsTwo() + { + var gt = new[] { 100, 500, 900 }; + var rs = new[] { 130, 700, 960 }; // 700 vs 500 = diff 200 + int hits = gt.Zip(rs, (g, r) => FrameMatch(r, g) ? 1 : 0).Sum(); + Assert.Equal(2, hits); + } + } + + // ───────────────────────────────────────────────────────────────────────── + // GroundTruthIncident model + // ───────────────────────────────────────────────────────────────────────── + + public class GroundTruthIncidentTests + { + [Fact] public void DefaultInstance_HasZeroCarIdx() + { + var g = new GroundTruthIncident(); + Assert.Equal(0, g.CarIdx); + } + + [Fact] public void Properties_CanBeSetAndRead() + { + var g = new GroundTruthIncident + { + IncidentIndex = 1, + CarIdx = 42, + ReplayFrameNum = 1500, + ReplaySessionTimeSec = 123.456, + DriverName = "Test Driver", + CarNumber = "99", + CustId = "12345", + LapDistPct = 0.75f, + LapNum = 3 + }; + Assert.Equal(1, g.IncidentIndex); + Assert.Equal(42, g.CarIdx); + Assert.Equal(1500, g.ReplayFrameNum); + Assert.Equal(123.456, g.ReplaySessionTimeSec, precision: 3); + Assert.Equal("Test Driver", g.DriverName); + Assert.Equal("99", g.CarNumber); + Assert.Equal("12345", g.CustId); + Assert.Equal(0.75f, g.LapDistPct); + Assert.Equal(3, g.LapNum); + } + + [Fact] public void SessionFlagsSnapshot_CanBeAssigned() + { + var g = new GroundTruthIncident + { + CarIdxSessionFlagsSnapshot = new[] { 1, 2, 3 } + }; + Assert.Equal(3, g.CarIdxSessionFlagsSnapshot.Length); + } + } + + // ───────────────────────────────────────────────────────────────────────── + // DataCaptureSuiteTestResult + // ───────────────────────────────────────────────────────────────────────── + + public class DataCaptureSuiteTestResultTests + { + [Fact] public void DefaultStatus_IsPending() => + Assert.Equal("pending", new DataCaptureSuiteTestResult().Status); + + [Fact] public void Status_CanBeChanged() + { + var r = new DataCaptureSuiteTestResult { Status = "pass" }; + Assert.Equal("pass", r.Status); + } + + [Fact] public void JsonPropertyNames_AreCorrect() + { + var r = new DataCaptureSuiteTestResult + { + TestId = "T0", Name = "Ground Truth", Status = "emitted", + EventName = DataCaptureSuiteConstants.EventGroundTruth, + KpiLabel = "incidents_captured", KpiValue = "3", Error = null + }; + var j = JObject.Parse(JsonConvert.SerializeObject(r)); + Assert.Equal("T0", j["testId"]?.ToString()); + Assert.Equal("Ground Truth", j["name"]?.ToString()); + Assert.Equal("emitted", j["status"]?.ToString()); + Assert.Equal(DataCaptureSuiteConstants.EventGroundTruth, j["eventName"]?.ToString()); + Assert.Equal("incidents_captured", j["kpiLabel"]?.ToString()); + Assert.Equal("3", j["kpiValue"]?.ToString()); + } + + [Fact] public void NullError_IsOmittedFromJson() + { + var j = JObject.Parse(JsonConvert.SerializeObject(new DataCaptureSuiteTestResult { Error = null })); + Assert.Null(j["error"]?.ToString() is string s && s == "" ? null : j["error"]); + } + + [Theory] + [InlineData("pending")] + [InlineData("emitted")] + [InlineData("pass")] + [InlineData("fail")] + [InlineData("skip")] + public void KnownStatuses_CanBeSet(string status) + { + var r = new DataCaptureSuiteTestResult { Status = status }; + Assert.Equal(status, r.Status); + } + } + + // ───────────────────────────────────────────────────────────────────────── + // DataCaptureSuiteSnapshot + // ───────────────────────────────────────────────────────────────────────── + + public class DataCaptureSuiteSnapshotTests + { + [Fact] public void DefaultPhase_IsIdle() => + Assert.Equal("idle", new DataCaptureSuiteSnapshot().Phase); + + [Fact] public void DefaultTotalSteps_Is10() => + Assert.Equal(10, new DataCaptureSuiteSnapshot().TotalSteps); + + [Fact] public void JsonPropertyNames_AreCorrect() + { + var snap = new DataCaptureSuiteSnapshot + { + Phase = "running", TestRunId = "abc-123", + CurrentStep = 3, TotalSteps = 10, CurrentStepName = "T1_Sweep", + ElapsedMs = 5000, GrafanaExploreUrl = "https://example.com/explore" + }; + var j = JObject.Parse(JsonConvert.SerializeObject(snap)); + Assert.Equal("running", j["phase"]?.ToString()); + Assert.Equal("abc-123", j["testRunId"]?.ToString()); + Assert.Equal(3, j["currentStep"]?.Value()); + Assert.Equal(10, j["totalSteps"]?.Value()); + Assert.Equal("T1_Sweep", j["currentStepName"]?.ToString()); + Assert.Equal(5000, j["elapsedMs"]?.Value()); + Assert.Equal("https://example.com/explore", j["grafanaExploreUrl"]?.ToString()); + } + + [Fact] public void TestResults_DefaultsToNull() => + Assert.Null(new DataCaptureSuiteSnapshot().TestResults); + + [Fact] public void TestResults_CanBeAssigned() + { + var snap = new DataCaptureSuiteSnapshot + { + TestResults = new[] { new DataCaptureSuiteTestResult { TestId = "T0", Status = "pass" } } + }; + Assert.Single(snap.TestResults); + Assert.Equal("pass", snap.TestResults[0].Status); + } + } + + // ───────────────────────────────────────────────────────────────────────── + // DataCaptureSuitePhase enum + // ───────────────────────────────────────────────────────────────────────── + + public class DataCaptureSuitePhaseTests + { + [Fact] public void Idle_IsFirst() => + Assert.Equal(0, (int)DataCaptureSuitePhase.Idle); + + [Fact] public void Enum_HasAllExpectedValues() + { + var names = Enum.GetNames(typeof(DataCaptureSuitePhase)); + Assert.Contains("Idle", names); + Assert.Contains("Running", names); + Assert.Contains("AwaitingLoki", names); + Assert.Contains("Complete", names); + Assert.Contains("Cancelled", names); + } + + [Fact] public void Enum_HasExactlyFiveValues() => + Assert.Equal(5, Enum.GetValues(typeof(DataCaptureSuitePhase)).Length); + + [Fact] public void ToString_Lowercase_MatchesPhaseFieldConvention() + { + Assert.Equal("idle", DataCaptureSuitePhase.Idle.ToString().ToLower()); + Assert.Equal("running", DataCaptureSuitePhase.Running.ToString().ToLower()); + Assert.Equal("awaitingloki", DataCaptureSuitePhase.AwaitingLoki.ToString().ToLower()); + Assert.Equal("complete", DataCaptureSuitePhase.Complete.ToString().ToLower()); + Assert.Equal("cancelled", DataCaptureSuitePhase.Cancelled.ToString().ToLower()); + } + } + + // ───────────────────────────────────────────────────────────────────────── + // LokiQueryClient + // ───────────────────────────────────────────────────────────────────────── + + public class LokiQueryClientTests + { + private const string SampleRunId = "b1a2c3d4-e5f6-7890-abcd-ef1234567890"; + + // ── BuildTestRunQuery ─────────────────────────────────────────────── + + [Fact] public void BuildTestRunQuery_ContainsTestRunId() => + Assert.Contains(SampleRunId, LokiQueryClient.BuildTestRunQuery(SampleRunId)); + + [Fact] public void BuildTestRunQuery_ContainsAppLabel() => + Assert.Contains("app=\"sim-steward\"", LokiQueryClient.BuildTestRunQuery(SampleRunId)); + + [Fact] public void BuildTestRunQuery_WithoutEvent_NoEventFilter() + { + var q = LokiQueryClient.BuildTestRunQuery(SampleRunId); + Assert.DoesNotContain("|event=", q); + } + + [Fact] public void BuildTestRunQuery_WithEvent_ContainsEventFilter() + { + var q = LokiQueryClient.BuildTestRunQuery(SampleRunId, DataCaptureSuiteConstants.EventGroundTruth); + Assert.Contains("|event=", q); + Assert.Contains(DataCaptureSuiteConstants.EventGroundTruth, q); + } + + [Fact] public void BuildTestRunQuery_WithEvent_AlsoContainsTestRunId() + { + var q = LokiQueryClient.BuildTestRunQuery(SampleRunId, DataCaptureSuiteConstants.EventGroundTruth); + Assert.Contains(SampleRunId, q); + } + + [Fact] public void BuildTestRunQuery_WithNullEvent_NoEventFilter() + { + var q = LokiQueryClient.BuildTestRunQuery(SampleRunId, null); + Assert.DoesNotContain("|event=", q); + } + + [Fact] public void BuildTestRunQuery_WithEmptyEvent_NoEventFilter() + { + var q = LokiQueryClient.BuildTestRunQuery(SampleRunId, ""); + Assert.DoesNotContain("|event=", q); + } + + [Fact] public void BuildTestRunQuery_StartsWithStreamSelector() + { + var q = LokiQueryClient.BuildTestRunQuery(SampleRunId); + Assert.StartsWith("{", q); + } + + [Theory] + [InlineData(DataCaptureSuiteConstants.EventGroundTruth)] + [InlineData(DataCaptureSuiteConstants.EventSpeedSample)] + [InlineData(DataCaptureSuiteConstants.EventVariableInventory)] + [InlineData(DataCaptureSuiteConstants.EventPlayerSnapshot)] + [InlineData(DataCaptureSuiteConstants.EventDriverRoster)] + [InlineData(DataCaptureSuiteConstants.EventCameraSwitchDriver)] + [InlineData(DataCaptureSuiteConstants.EventCameraViewSample)] + [InlineData(DataCaptureSuiteConstants.EventSessionResults)] + [InlineData(DataCaptureSuiteConstants.EventIncidentReseek)] + [InlineData(DataCaptureSuiteConstants.EventFfSweepResult)] + public void BuildTestRunQuery_AllEventNames_ProduceNonEmptyQuery(string eventName) + { + var q = LokiQueryClient.BuildTestRunQuery(SampleRunId, eventName); + Assert.False(string.IsNullOrWhiteSpace(q)); + Assert.Contains(eventName, q); + } + + // ── BuildGrafanaExploreUrl ────────────────────────────────────────── + + [Fact] public void BuildGrafanaExploreUrl_EmptyBase_ReturnsEmpty() => + Assert.Equal("", LokiQueryClient.BuildGrafanaExploreUrl("", SampleRunId)); + + [Fact] public void BuildGrafanaExploreUrl_NullBase_ReturnsEmpty() => + Assert.Equal("", LokiQueryClient.BuildGrafanaExploreUrl(null, SampleRunId)); + + [Fact] public void BuildGrafanaExploreUrl_EmptyRunId_ReturnsEmpty() => + Assert.Equal("", LokiQueryClient.BuildGrafanaExploreUrl("https://example.grafana.net", "")); + + [Fact] public void BuildGrafanaExploreUrl_NullRunId_ReturnsEmpty() => + Assert.Equal("", LokiQueryClient.BuildGrafanaExploreUrl("https://example.grafana.net", null)); + + [Fact] public void BuildGrafanaExploreUrl_ContainsExplore() => + Assert.Contains("explore", LokiQueryClient.BuildGrafanaExploreUrl("https://example.grafana.net", SampleRunId)); + + [Fact] public void BuildGrafanaExploreUrl_ContainsEncodedTestRunId() + { + var url = LokiQueryClient.BuildGrafanaExploreUrl("https://example.grafana.net", SampleRunId); + // test_run_id appears in the URL (may be percent-encoded) + Assert.Contains(SampleRunId, Uri.UnescapeDataString(url)); + } + + [Fact] public void BuildGrafanaExploreUrl_StartsWithBase() + { + var url = LokiQueryClient.BuildGrafanaExploreUrl("https://example.grafana.net", SampleRunId); + Assert.StartsWith("https://example.grafana.net/", url); + } + + [Fact] public void BuildGrafanaExploreUrl_TrailingSlashOnBase_IsStripped() + { + var withSlash = LokiQueryClient.BuildGrafanaExploreUrl("https://example.grafana.net/", SampleRunId); + var withoutSlash = LokiQueryClient.BuildGrafanaExploreUrl("https://example.grafana.net", SampleRunId); + Assert.Equal(withoutSlash, withSlash); + } + + // ── BuildGrafanaExploreUrl (3-arg: per-event) ───────────────────── + + [Fact] public void BuildGrafanaExploreUrl_WithEvent_ContainsEventName() + { + var url = LokiQueryClient.BuildGrafanaExploreUrl("https://example.grafana.net", SampleRunId, DataCaptureSuiteConstants.EventGroundTruth); + Assert.Contains(DataCaptureSuiteConstants.EventGroundTruth, Uri.UnescapeDataString(url)); + } + + [Fact] public void BuildGrafanaExploreUrl_WithEvent_ContainsRunId() + { + var url = LokiQueryClient.BuildGrafanaExploreUrl("https://example.grafana.net", SampleRunId, DataCaptureSuiteConstants.EventGroundTruth); + Assert.Contains(SampleRunId, Uri.UnescapeDataString(url)); + } + + [Fact] public void BuildGrafanaExploreUrl_WithEvent_EmptyBase_ReturnsEmpty() => + Assert.Equal("", LokiQueryClient.BuildGrafanaExploreUrl("", SampleRunId, "some_event")); + + [Fact] public void BuildGrafanaExploreUrl_WithEvent_NullEvent_StillReturnsUrl() + { + var url = LokiQueryClient.BuildGrafanaExploreUrl("https://example.grafana.net", SampleRunId, null); + Assert.Contains("explore", url); + Assert.Contains(SampleRunId, Uri.UnescapeDataString(url)); + } + + [Fact] public void BuildGrafanaExploreUrl_WithEvent_StartsWithBase() + { + var url = LokiQueryClient.BuildGrafanaExploreUrl("https://example.grafana.net", SampleRunId, "test_event"); + Assert.StartsWith("https://example.grafana.net/", url); + } + + // ── Timestamp helpers ─────────────────────────────────────────────── + + [Fact] public void NowNs_ReturnsPositiveValue() => + Assert.True(LokiQueryClient.NowNs() > 0); + + [Fact] public void NowNs_IsInNanoseconds() + { + // 2020-01-01 in ns is ~1577836800000000000 + Assert.True(LokiQueryClient.NowNs() > 1_577_836_800_000_000_000L); + } + + [Fact] public void NowMinusMs_ZeroOffset_ApproximatelyEqualsNowNs() + { + long t1 = LokiQueryClient.NowMinusMs(0); + long t2 = LokiQueryClient.NowNs(); + // Should be within 100ms of each other + Assert.InRange(t2 - t1, 0L, 100_000_000L); + } + + [Fact] public void NowMinusMs_OneHour_IsLessThanNowNs() + { + long oneHourAgo = LokiQueryClient.NowMinusMs(3_600_000L); + long now = LokiQueryClient.NowNs(); + Assert.True(oneHourAgo < now); + } + + [Fact] public void NowMinusMs_OneHour_DiffIsApproximatelyOneHour() + { + long oneHourAgo = LokiQueryClient.NowMinusMs(3_600_000L); + long now = LokiQueryClient.NowNs(); + long diffMs = (now - oneHourAgo) / 1_000_000L; + // Allow ±100ms tolerance + Assert.InRange(diffMs, 3_599_900L, 3_600_100L); + } + + [Fact] public void NowMinusMs_LargeOffset_StillPositive() + { + // 1 day ago should still be a positive epoch + Assert.True(LokiQueryClient.NowMinusMs(86_400_000L) > 0); + } + } + + // ───────────────────────────────────────────────────────────────────────── + // Cross-cutting: event names align with result structure expectations + // ───────────────────────────────────────────────────────────────────────── + + public class DataCaptureSuiteIntegrationModelTests + { + /// + /// Each of the 12 test steps (T0–T8 + T5b + T_DISC + T_60Hz) has a unique event name constant. + /// This mirrors the expected test result rows in the dashboard. + /// + [Fact] public void TwelveUniquePerTestEventNames() + { + var perTestEvents = new[] + { + DataCaptureSuiteConstants.EventGroundTruth, + DataCaptureSuiteConstants.EventSpeedSample, + DataCaptureSuiteConstants.EventVariableInventory, + DataCaptureSuiteConstants.EventPlayerSnapshot, + DataCaptureSuiteConstants.EventDriverRoster, + DataCaptureSuiteConstants.EventCameraSwitchDriver, + DataCaptureSuiteConstants.EventCameraViewSample, + DataCaptureSuiteConstants.EventSessionResults, + DataCaptureSuiteConstants.EventIncidentReseek, + DataCaptureSuiteConstants.EventFfSweepResult, + DataCaptureSuiteConstants.EventDataDiscovery, + DataCaptureSuiteConstants.Event60HzSummary, + }; + Assert.Equal(12, perTestEvents.Length); + Assert.Equal(12, perTestEvents.Distinct().Count()); + } + + [Fact] public void SuiteLifecycleEvents_AreDistinctFromTestEvents() + { + var perTest = new HashSet + { + DataCaptureSuiteConstants.EventGroundTruth, + DataCaptureSuiteConstants.EventSpeedSample, + DataCaptureSuiteConstants.EventVariableInventory, + DataCaptureSuiteConstants.EventPlayerSnapshot, + DataCaptureSuiteConstants.EventDriverRoster, + DataCaptureSuiteConstants.EventCameraSwitchDriver, + DataCaptureSuiteConstants.EventCameraViewSample, + DataCaptureSuiteConstants.EventCameraViewSummary, + DataCaptureSuiteConstants.EventSessionResults, + DataCaptureSuiteConstants.EventIncidentReseek, + DataCaptureSuiteConstants.EventFfSweepResult, + }; + Assert.DoesNotContain(DataCaptureSuiteConstants.EventSuiteStarted, perTest); + Assert.DoesNotContain(DataCaptureSuiteConstants.EventSuiteComplete, perTest); + } + + /// + /// LokiVerifyDelayMs must be large enough for Alloy file-tail ingestion (~10-15s). + /// + [Fact] public void LokiVerifyDelayMs_AtLeast10Seconds() => + Assert.True(DataCaptureSuiteConstants.LokiVerifyDelayMs >= 10_000); + + /// + /// NextIncidentCooldownTicks at 60Hz = 2.5s, which is the known SDK seek cooldown. + /// + [Fact] public void NextIncidentCooldownTicks_At60Hz_Is2_5Seconds() + { + double seconds = DataCaptureSuiteConstants.NextIncidentCooldownTicks / 60.0; + Assert.Equal(2.5, seconds, precision: 2); + } + + /// + /// CamSettleTicks at 60Hz = 1.0s, which matches the CamSwitchPos settle time. + /// + [Fact] public void CamSettleTicks_At60Hz_Is1Second() + { + double seconds = DataCaptureSuiteConstants.CamSettleTicks / 60.0; + Assert.Equal(1.0, seconds, precision: 2); + } + + /// + /// SeekTimeoutTicks at 60Hz = 10s, which is enough time for a ToStart seek. + /// + [Fact] public void SeekTimeoutTicks_At60Hz_IsAtLeast5Seconds() + { + double seconds = DataCaptureSuiteConstants.SeekTimeoutTicks / 60.0; + Assert.True(seconds >= 5.0); + } + + /// + /// The speed sweep covers all four speeds from the plan spec. + /// + [Fact] public void SpeedSweepCoversAllPlanSpeeds() + { + var required = new[] { 1, 4, 8, 16 }; + Assert.All(required, s => Assert.Contains(s, DataCaptureSuiteConstants.SpeedSweepSpeeds)); + } + } + + // ───────────────────────────────────────────────────────────────────────── + // T0 SelectGroundTruthFrames + // ───────────────────────────────────────────────────────────────────────── + + public class SelectGroundTruthFramesTests + { + [Fact] public void PrefersDifferentLaps_SkippingLap1() + { + var candidates = new List<(int frame, int lap, int carIdx)> + { + (100, 1, 0), // lap 1 — should be skipped + (200, 2, 1), + (300, 3, 2), + (400, 4, 3), + }; + var selected = DataCaptureSuiteSelection.SelectGroundTruthFrames(candidates); + Assert.Equal(3, selected.Length); + Assert.Equal(new[] { 200, 300, 400 }, selected); + } + + [Fact] public void FallsBackToLap1_WhenNotEnoughOtherLaps() + { + var candidates = new List<(int frame, int lap, int carIdx)> + { + (100, 1, 0), + (200, 1, 1), + (300, 2, 2), + }; + var selected = DataCaptureSuiteSelection.SelectGroundTruthFrames(candidates); + Assert.Equal(3, selected.Length); + Assert.Contains(300, selected); // lap 2 preferred + Assert.Contains(100, selected); // lap 1 used as fallback + } + + [Fact] public void ReturnsFewerThan3_WhenNotEnoughCandidates() + { + var candidates = new List<(int frame, int lap, int carIdx)> + { + (100, 3, 0), + (200, 5, 1), + }; + var selected = DataCaptureSuiteSelection.SelectGroundTruthFrames(candidates); + Assert.Equal(2, selected.Length); + } + + [Fact] public void ReturnsEmpty_WhenNoCandidates() + { + var selected = DataCaptureSuiteSelection.SelectGroundTruthFrames(new List<(int, int, int)>()); + Assert.Empty(selected); + } + + [Fact] public void PrefersDifferentLaps_NotSameLapTwice() + { + var candidates = new List<(int frame, int lap, int carIdx)> + { + (100, 3, 0), + (200, 3, 1), // same lap as first — should be skipped in pass 1 + (300, 5, 2), + (400, 7, 3), + }; + var selected = DataCaptureSuiteSelection.SelectGroundTruthFrames(candidates); + Assert.Equal(3, selected.Length); + Assert.Equal(new[] { 100, 300, 400 }, selected); + } + } + + // ───────────────────────────────────────────────────────────────────────── + // Preflight model + // ───────────────────────────────────────────────────────────────────────── + + public class PreflightMiniTestTests + { + [Fact] + public void DefaultStatus_IsPending() + { + var t = new PreflightMiniTest { Id = "PC_WS", Name = "WebSocket", Level = 1 }; + Assert.Equal("pending", t.Status); + } + + [Fact] + public void SerializesToJson_WithExpectedKeys() + { + var t = new PreflightMiniTest { Id = "PC_WS", Name = "WS", Status = "pass", Detail = "ok", Level = 1 }; + var json = JObject.FromObject(t); + Assert.Equal("PC_WS", json["id"]?.ToString()); + Assert.Equal("pass", json["status"]?.ToString()); + Assert.Equal(1, json["level"]?.Value()); + } + } + + public class PreflightSnapshotTests + { + [Fact] + public void DefaultValues() + { + var snap = new PreflightSnapshot(); + Assert.Equal("idle", snap.Phase); + Assert.Equal("full", snap.ReplayScope); + Assert.Equal(0, snap.Level); + Assert.False(snap.AllPassed); + Assert.Null(snap.MiniTests); + Assert.Null(snap.CorrelationId); + } + + [Fact] + public void AllPassed_Serializes() + { + var snap = new PreflightSnapshot + { + Phase = "complete", + Level = 2, + AllPassed = true, + CorrelationId = "abc-123", + ReplayScope = "partial", + MiniTests = new[] + { + new PreflightMiniTest { Id = "PC_WS", Name = "WS", Status = "pass", Level = 1 }, + new PreflightMiniTest { Id = "PC_CHECKERED", Name = "Checkered", Status = "skip", Level = 2 }, + } + }; + var json = JObject.FromObject(snap); + Assert.True(json["allPassed"]?.Value()); + Assert.Equal("partial", json["replayScope"]?.ToString()); + Assert.Equal("abc-123", json["correlationId"]?.ToString()); + Assert.Equal(2, (json["miniTests"] as JArray)?.Count); + } + + [Fact] + public void BackwardCompat_FlatBooleans_StillSerialize() + { + var snap = new PreflightSnapshot + { + GrafanaOk = true, + SimHubOk = true, + CheckeredOk = false, + ResultsPopulated = true, + SessionStateAtEnd = 5, + }; + var json = JObject.FromObject(snap); + Assert.True(json["grafanaOk"]?.Value()); + Assert.True(json["simHubOk"]?.Value()); + Assert.False(json["checkeredOk"]?.Value()); + Assert.Equal(5, json["sessionStateAtEnd"]?.Value()); + } + } +} diff --git a/src/SimSteward.Plugin.Tests/PluginVersionInfoTests.cs b/src/SimSteward.Plugin.Tests/PluginVersionInfoTests.cs new file mode 100644 index 0000000..e527666 --- /dev/null +++ b/src/SimSteward.Plugin.Tests/PluginVersionInfoTests.cs @@ -0,0 +1,13 @@ +using Xunit; + +namespace SimSteward.Plugin.Tests +{ + public class PluginVersionInfoTests + { + [Fact] + public void Display_IsNonEmpty() + { + Assert.False(string.IsNullOrWhiteSpace(PluginVersionInfo.Display)); + } + } +} diff --git a/src/SimSteward.Plugin/DashboardBridge.cs b/src/SimSteward.Plugin/DashboardBridge.cs index bde47aa..6e63795 100644 --- a/src/SimSteward.Plugin/DashboardBridge.cs +++ b/src/SimSteward.Plugin/DashboardBridge.cs @@ -3,6 +3,7 @@ using Fleck; using Newtonsoft.Json; using Newtonsoft.Json.Linq; +using Sentry; namespace SimSteward.Plugin { @@ -72,6 +73,7 @@ public void Start(string bindAddress, int port, string authToken) } catch (Exception ex) { + SentrySdk.CaptureException(ex); _logger?.Warn($"DashboardBridge: getStateForNewClient failed: {ex.Message}"); } try @@ -82,6 +84,7 @@ public void Start(string bindAddress, int port, string authToken) } catch (Exception ex) { + SentrySdk.CaptureException(ex); _logger?.Warn($"DashboardBridge: getLogTailForNewClient failed: {ex.Message}"); } }; @@ -106,6 +109,7 @@ public void Start(string bindAddress, int port, string authToken) } catch (Exception ex) { + SentrySdk.CaptureException(ex); _logger?.Error($"DashboardBridge: failed to start: {ex.Message}", ex); throw; } @@ -117,6 +121,7 @@ public void Stop() try { _server.Dispose(); } catch (Exception ex) { + SentrySdk.CaptureException(ex); _logger?.Warn($"DashboardBridge: dispose error: {ex.Message}"); } _server = null; diff --git a/src/SimSteward.Plugin/DataCaptureSuite.cs b/src/SimSteward.Plugin/DataCaptureSuite.cs new file mode 100644 index 0000000..b28d032 --- /dev/null +++ b/src/SimSteward.Plugin/DataCaptureSuite.cs @@ -0,0 +1,148 @@ +using System.Collections.Generic; +using System.Linq; +using Newtonsoft.Json; + +namespace SimSteward.Plugin +{ + public enum DataCaptureSuitePhase { Idle, Running, AwaitingLoki, Complete, Cancelled } + + /// A single ground-truth incident captured at 1x speed during T0. + public class GroundTruthIncident + { + public int IncidentIndex { get; set; } + public int CarIdx { get; set; } + public int ReplayFrameNum { get; set; } + public double ReplaySessionTimeSec { get; set; } + public int[] CarIdxSessionFlagsSnapshot { get; set; } + public string DriverName { get; set; } + public string CarNumber { get; set; } + public string CustId { get; set; } + public float LapDistPct { get; set; } + public int LapNum { get; set; } + } + + /// Per-test result for dashboard display and Loki verification. + public class DataCaptureSuiteTestResult + { + [JsonProperty("testId")] public string TestId { get; set; } + [JsonProperty("name")] public string Name { get; set; } + /// pending / emitted / found / pass / fail / skip + [JsonProperty("status")] public string Status { get; set; } = "pending"; + [JsonProperty("eventName")] public string EventName { get; set; } + [JsonProperty("kpiLabel")] public string KpiLabel { get; set; } + [JsonProperty("kpiValue")] public string KpiValue { get; set; } + [JsonProperty("error")] public string Error { get; set; } + [JsonProperty("lokiCount")] public int LokiCount { get; set; } + [JsonProperty("grafanaEventUrl")] public string GrafanaEventUrl { get; set; } + } + + /// Summary of a selected ground-truth incident for dashboard display. + public class SelectedIncidentSummary + { + [JsonProperty("index")] public int Index { get; set; } + [JsonProperty("frame")] public int Frame { get; set; } + [JsonProperty("lap")] public int Lap { get; set; } + [JsonProperty("driverName")] public string DriverName { get; set; } + [JsonProperty("carNumber")] public string CarNumber { get; set; } + [JsonProperty("custId")] public string CustId { get; set; } + /// "different_lap", "first_available", or "fallback" + [JsonProperty("reason")] public string Reason { get; set; } + } + + /// Snapshot broadcast in state.dataCaptureSuite (WebSocket). + public class DataCaptureSuiteSnapshot + { + [JsonProperty("phase")] public string Phase { get; set; } = "idle"; + [JsonProperty("testRunId")] public string TestRunId { get; set; } + [JsonProperty("currentStep")] public int CurrentStep { get; set; } + [JsonProperty("totalSteps")] public int TotalSteps { get; set; } = 10; + [JsonProperty("currentStepName")] public string CurrentStepName { get; set; } + [JsonProperty("elapsedMs")] public long ElapsedMs { get; set; } + [JsonProperty("testResults")] public DataCaptureSuiteTestResult[] TestResults { get; set; } + [JsonProperty("grafanaExploreUrl")] public string GrafanaExploreUrl { get; set; } + [JsonProperty("selectedIncidents")] public SelectedIncidentSummary[] SelectedIncidents { get; set; } + } + + /// Constants shared between the state machine and unit tests. + public static class DataCaptureSuiteConstants + { + public const int LokiVerifyDelayMs = 15_000; + public static readonly int[] SpeedSweepSpeeds = { 1, 4, 8, 16 }; + /// Frames beyond the last GT incident to advance during T1 sweep. + public const int SpeedSweepAdvanceFrames = 300; + /// + /// Combined flag mask used to detect incident rising edges during the T1 speed sweep. + /// Matches | + /// . + /// + public const int IncidentFlagMask = 0x80000 | 0x100000; // furled | repair + /// Ticks to wait after ReplaySearch(NextIncident) before reading frame/car data (~2.5 s at 60 Hz). + public const int NextIncidentCooldownTicks = 150; + /// Ticks to wait after CamSwitchPos before reading CamCarIdx (~1 s at 60 Hz). + public const int CamSettleTicks = 60; + /// Consecutive ticks with ReplayFrameNum ≤ 2 required to confirm frame-zero stability. + public const int FrameZeroStableTicks = 4; + /// Max ticks to wait for frame-zero before giving up. + public const int SeekTimeoutTicks = 600; + + // ── T0 scan/select constants ────────────────────────────────────────── + /// Max NextIncident calls during the T0 incident scan pass. + public const int T0_ScanMaxIncidents = 30; + /// Skip incidents on laps ≤ this when possible (avoid first-lap drama). + public const int T0_MinLapForSelection = 1; + /// Frame tolerance for seek-settle during T0 capture pass. + public const int T0_SeekSettleTolerance = 30; + + // ── Structured log event names ────────────────────────────────────────── + public const string EventGroundTruth = "sdk_capture_ground_truth_incident"; + public const string EventSpeedSample = "sdk_capture_speed_sample"; + public const string EventVariableInventory = "sdk_capture_variable_inventory"; + public const string EventPlayerSnapshot = "sdk_capture_player_snapshot"; + public const string EventDriverRoster = "sdk_capture_driver_roster"; + public const string EventCameraSwitchDriver = "sdk_capture_camera_switch_driver"; + public const string EventCameraViewSample = "sdk_capture_camera_view_sample"; + public const string EventCameraViewSummary = "sdk_capture_camera_view_summary"; + public const string EventSessionResults = "sdk_capture_session_results"; + public const string EventIncidentReseek = "sdk_capture_incident_reseek"; + public const string EventFfSweepResult = "sdk_capture_ff_sweep_result"; + public const string EventSuiteStarted = "sdk_capture_suite_started"; + public const string EventSuiteComplete = "sdk_capture_suite_complete"; + public const string EventDataDiscovery = "sdk_capture_data_discovery"; + public const string Event60HzSummary = "sdk_capture_60hz_summary"; + public const string EventPreflightCheck = "sdk_capture_preflight_check"; + public const string EventPreflightProbe = "sdk_capture_preflight_probe"; + } + + /// T0 incident selection algorithm — testable outside SIMHUB_SDK. + public static class DataCaptureSuiteSelection + { + /// + /// Selects up to 3 incident frames from candidates, preferring different laps and skipping lap 1. + /// + public static int[] SelectGroundTruthFrames(List<(int frame, int lap, int carIdx)> candidates) + { + var result = new List(); + var usedLaps = new HashSet(); + + // Pass 1: different laps, lap > T0_MinLapForSelection + foreach (var c in candidates.Where(c => c.lap > DataCaptureSuiteConstants.T0_MinLapForSelection).OrderBy(c => c.frame)) + { + if (!usedLaps.Contains(c.lap)) + { + result.Add(c.frame); + usedLaps.Add(c.lap); + if (result.Count == 3) return result.ToArray(); + } + } + + // Pass 2: fill from any remaining candidates + foreach (var c in candidates.OrderBy(c => c.frame)) + { + if (result.Count >= 3) break; + if (!result.Contains(c.frame)) result.Add(c.frame); + } + + return result.Take(3).ToArray(); + } + } +} diff --git a/src/SimSteward.Plugin/HighRateTelemetryRecorder.cs b/src/SimSteward.Plugin/HighRateTelemetryRecorder.cs new file mode 100644 index 0000000..1e50f68 --- /dev/null +++ b/src/SimSteward.Plugin/HighRateTelemetryRecorder.cs @@ -0,0 +1,176 @@ +#if SIMHUB_SDK +using System; +using System.IO; +using System.Text; +using IRSDKSharper; + +namespace SimSteward.Plugin +{ + /// + /// Feature-flagged 60 Hz telemetry recorder for testing only. + /// Writes one JSONL row per call with selected + /// telemetry variables from the iRacing SDK data catalog (Tier 1 + Tier 2). + /// Gated by env var SIMSTEWARD_60HZ_TEST_CAPTURE=1. + /// + public sealed class HighRateTelemetryRecorder : IDisposable + { + private const int CarSlotCount = 64; + + private readonly StreamWriter _writer; + private readonly string _filePath; + private int _ticksRecorded; + private readonly DateTime _startUtc; + + public HighRateTelemetryRecorder(string testRunId, string basePath) + { + _startUtc = DateTime.UtcNow; + var dir = Path.Combine(basePath, "60hz"); + Directory.CreateDirectory(dir); + _filePath = Path.Combine(dir, $"sdk_60hz_capture_{testRunId}.jsonl"); + _writer = new StreamWriter(_filePath, append: false, encoding: new UTF8Encoding(false), bufferSize: 65536); + } + + public string FilePath => _filePath; + public int TicksRecorded => _ticksRecorded; + + /// Records one tick of telemetry. Called from DataUpdate (~60 Hz). + public void RecordTick(IRacingSdk irsdk) + { + if (irsdk?.Data == null) return; + try + { + var sb = new StringBuilder(2048); + sb.Append('{'); + + AppendInt(sb, "frame", GetInt(irsdk, "ReplayFrameNum")); + sb.Append(','); + AppendDouble(sb, "sessionTime", GetDouble(irsdk, "SessionTime")); + sb.Append(','); + AppendInt(sb, "sessionState", GetInt(irsdk, "SessionState")); + sb.Append(','); + AppendInt(sb, "sessionFlags", GetInt(irsdk, "SessionFlags")); + sb.Append(','); + AppendInt(sb, "camCarIdx", GetInt(irsdk, "CamCarIdx")); + + // Per-car arrays (Tier 1) + sb.Append(",\"carIdxTrackSurface\":"); + AppendIntArray(sb, irsdk, "CarIdxTrackSurface"); + sb.Append(",\"carIdxTrackSurfaceMaterial\":"); + AppendIntArray(sb, irsdk, "CarIdxTrackSurfaceMaterial"); + sb.Append(",\"carIdxPosition\":"); + AppendIntArray(sb, irsdk, "CarIdxPosition"); + sb.Append(",\"carIdxClassPosition\":"); + AppendIntArray(sb, irsdk, "CarIdxClassPosition"); + sb.Append(",\"carIdxLap\":"); + AppendIntArray(sb, irsdk, "CarIdxLap"); + sb.Append(",\"carIdxLapDistPct\":"); + AppendFloatArray(sb, irsdk, "CarIdxLapDistPct"); + sb.Append(",\"carIdxSessionFlags\":"); + AppendIntArray(sb, irsdk, "CarIdxSessionFlags"); + sb.Append(",\"carIdxOnPitRoad\":"); + AppendBoolArray(sb, irsdk, "CarIdxOnPitRoad"); + + // Focused-car telemetry (Tier 2) + sb.Append(','); + AppendFloat(sb, "latAccel", GetFloat(irsdk, "LatAccel")); + sb.Append(','); + AppendFloat(sb, "lonAccel", GetFloat(irsdk, "LonAccel")); + sb.Append(','); + AppendFloat(sb, "yawRate", GetFloat(irsdk, "YawRate")); + + sb.Append('}'); + _writer.WriteLine(sb.ToString()); + _ticksRecorded++; + } + catch { /* never throw on telemetry tick path */ } + } + + /// + /// Flushes and closes the file. Returns summary stats for the log event. + /// + public (int ticksRecorded, long fileSizeBytes, double durationSec) Finish() + { + try { _writer.Flush(); _writer.Close(); } + catch { /* best effort */ } + + long size = 0; + try { size = new FileInfo(_filePath).Length; } catch { } + double dur = (DateTime.UtcNow - _startUtc).TotalSeconds; + return (_ticksRecorded, size, dur); + } + + public void Dispose() + { + try { _writer?.Dispose(); } catch { } + } + + // ── Helpers ─────────────────────────────────────────────────────────── + + private static int GetInt(IRacingSdk sdk, string name) + { + try { return sdk.Data.GetInt(name); } catch { return 0; } + } + + private static double GetDouble(IRacingSdk sdk, string name) + { + try { return sdk.Data.GetDouble(name); } catch { return 0; } + } + + private static float GetFloat(IRacingSdk sdk, string name) + { + try { return sdk.Data.GetFloat(name); } catch { return 0f; } + } + + private static void AppendInt(StringBuilder sb, string key, int val) + { + sb.Append('"').Append(key).Append("\":").Append(val); + } + + private static void AppendDouble(StringBuilder sb, string key, double val) + { + sb.Append('"').Append(key).Append("\":").Append(val.ToString("F4")); + } + + private static void AppendFloat(StringBuilder sb, string key, float val) + { + sb.Append('"').Append(key).Append("\":").Append(val.ToString("F4")); + } + + private static void AppendIntArray(StringBuilder sb, IRacingSdk sdk, string name) + { + sb.Append('['); + for (int i = 0; i < CarSlotCount; i++) + { + if (i > 0) sb.Append(','); + try { sb.Append(sdk.Data.GetInt(name, i)); } + catch { sb.Append('0'); } + } + sb.Append(']'); + } + + private static void AppendFloatArray(StringBuilder sb, IRacingSdk sdk, string name) + { + sb.Append('['); + for (int i = 0; i < CarSlotCount; i++) + { + if (i > 0) sb.Append(','); + try { sb.Append(sdk.Data.GetFloat(name, i).ToString("F4")); } + catch { sb.Append("0.0"); } + } + sb.Append(']'); + } + + private static void AppendBoolArray(StringBuilder sb, IRacingSdk sdk, string name) + { + sb.Append('['); + for (int i = 0; i < CarSlotCount; i++) + { + if (i > 0) sb.Append(','); + try { sb.Append(sdk.Data.GetBool(name, i) ? "true" : "false"); } + catch { sb.Append("false"); } + } + sb.Append(']'); + } + } +} +#endif diff --git a/src/SimSteward.Plugin/LokiQueryClient.cs b/src/SimSteward.Plugin/LokiQueryClient.cs new file mode 100644 index 0000000..0b215c7 --- /dev/null +++ b/src/SimSteward.Plugin/LokiQueryClient.cs @@ -0,0 +1,168 @@ +using System; +using System.Collections.Generic; +using System.Net.Http; +using System.Net.Http.Headers; +using System.Text; +using System.Threading.Tasks; +using Newtonsoft.Json.Linq; + +namespace SimSteward.Plugin +{ + /// + /// Thin HTTP client for Loki /loki/api/v1/query_range used by the data-capture suite + /// to verify structured log events reached Loki within the expected window. + /// + public static class LokiQueryClient + { + private static readonly HttpClient LokiClient = new HttpClient + { + Timeout = TimeSpan.FromSeconds(5) + }; + + /// + /// Returns the number of log lines that match in the given + /// nanosecond time window. Returns -1 on any error/timeout. + /// + public static async Task CountMatchingAsync( + string lokiReadUrl, + string logql, + long startNs, + long endNs, + string basicAuthUser = null, + string basicAuthPass = null) + { + if (string.IsNullOrEmpty(lokiReadUrl)) return -1; + try + { + var encoded = Uri.EscapeDataString(logql); + var url = $"{lokiReadUrl.TrimEnd('/')}/loki/api/v1/query_range" + + $"?query={encoded}&start={startNs}&end={endNs}&limit=1000&direction=forward"; + + using var req = new HttpRequestMessage(HttpMethod.Get, url); + if (!string.IsNullOrEmpty(basicAuthUser) && basicAuthPass != null) + { + var creds = Convert.ToBase64String(Encoding.UTF8.GetBytes(basicAuthUser + ":" + basicAuthPass)); + req.Headers.Authorization = new AuthenticationHeaderValue("Basic", creds); + } + + using var resp = await LokiClient.SendAsync(req).ConfigureAwait(false); + if (!resp.IsSuccessStatusCode) return -1; + + var body = await resp.Content.ReadAsStringAsync().ConfigureAwait(false); + var jo = JObject.Parse(body); + var results = jo["data"]?["result"] as JArray; + if (results == null) return 0; + + int total = 0; + foreach (var stream in results) + { + if (stream["values"] is JArray vals) total += vals.Count; + } + return total; + } + catch + { + return -1; + } + } + + /// + /// Builds a LogQL query that selects all events for a given , + /// optionally filtered to a single . + /// + public static string BuildTestRunQuery(string testRunId, string eventName = null) + { + var q = $"{{app=\"sim-steward\"}}|json|test_run_id=\"{testRunId}\""; + if (!string.IsNullOrEmpty(eventName)) + q += $"|event=\"{eventName}\""; + return q; + } + + /// + /// Builds a Grafana Explore deep-link URL filtered to a specific . + /// Returns empty string if either argument is missing. + /// + public static string BuildGrafanaExploreUrl(string grafanaBaseUrl, string testRunId) + { + return BuildGrafanaExploreUrl(grafanaBaseUrl, testRunId, null); + } + + /// + /// Builds a Grafana Explore deep-link URL filtered to a specific and optional . + /// Uses Grafana 9+ object format for the left parameter. Returns empty string if base URL or run ID is missing. + /// + public static string BuildGrafanaExploreUrl(string grafanaBaseUrl, string testRunId, string eventName) + { + if (string.IsNullOrEmpty(grafanaBaseUrl) || string.IsNullOrEmpty(testRunId)) return ""; + var logql = $"{{app=\"sim-steward\"}} |json |test_run_id=\"{testRunId}\""; + if (!string.IsNullOrEmpty(eventName)) + logql += $" |event=\"{eventName}\""; + var exprJson = Newtonsoft.Json.JsonConvert.SerializeObject(logql); + var left = $"{{\"datasource\":\"loki_local\",\"queries\":[{{\"refId\":\"A\",\"expr\":{exprJson},\"queryType\":\"range\"}}],\"range\":{{\"from\":\"now-1h\",\"to\":\"now\"}}}}"; + return $"{grafanaBaseUrl.TrimEnd('/')}/explore?orgId=1&left={Uri.EscapeDataString(left)}"; + } + + /// + /// Queries Loki and returns parsed JSON log line objects. Returns empty list on error. + /// + public static async Task> QueryLinesAsync( + string lokiReadUrl, + string logql, + long startNs, + long endNs, + string basicAuthUser = null, + string basicAuthPass = null) + { + var lines = new List(); + if (string.IsNullOrEmpty(lokiReadUrl)) return lines; + try + { + var encoded = Uri.EscapeDataString(logql); + var url = $"{lokiReadUrl.TrimEnd('/')}/loki/api/v1/query_range" + + $"?query={encoded}&start={startNs}&end={endNs}&limit=1000&direction=forward"; + + using var req = new HttpRequestMessage(HttpMethod.Get, url); + if (!string.IsNullOrEmpty(basicAuthUser) && basicAuthPass != null) + { + var creds = Convert.ToBase64String(Encoding.UTF8.GetBytes(basicAuthUser + ":" + basicAuthPass)); + req.Headers.Authorization = new AuthenticationHeaderValue("Basic", creds); + } + + using var resp = await LokiClient.SendAsync(req).ConfigureAwait(false); + if (!resp.IsSuccessStatusCode) return lines; + + var body = await resp.Content.ReadAsStringAsync().ConfigureAwait(false); + var jo = JObject.Parse(body); + var results = jo["data"]?["result"] as JArray; + if (results == null) return lines; + + foreach (var stream in results) + { + if (!(stream["values"] is JArray vals)) continue; + foreach (var v in vals) + { + if (v is JArray pair && pair.Count >= 2) + { + var line = pair[1]?.ToString(); + if (!string.IsNullOrEmpty(line)) + { + try { lines.Add(JObject.Parse(line)); } + catch { /* skip malformed lines */ } + } + } + } + } + } + catch { /* return empty list on any error */ } + return lines; + } + + /// Returns nanoseconds since UNIX epoch for UtcNow. + public static long NowNs() => + DateTimeOffset.UtcNow.ToUnixTimeMilliseconds() * 1_000_000L; + + /// Returns nanoseconds since UNIX epoch for UtcNow - offsetMs. + public static long NowMinusMs(long offsetMs) => + (DateTimeOffset.UtcNow.ToUnixTimeMilliseconds() - offsetMs) * 1_000_000L; + } +} diff --git a/src/SimSteward.Plugin/PluginControl.xaml b/src/SimSteward.Plugin/PluginControl.xaml index e2aa47e..c1970dc 100644 --- a/src/SimSteward.Plugin/PluginControl.xaml +++ b/src/SimSteward.Plugin/PluginControl.xaml @@ -31,6 +31,7 @@ + Dependency status and WebSocket bridge. Dashboard: http://localhost:8888/Web/sim-steward-dash/index.html — WS: ws://localhost:19847 diff --git a/src/SimSteward.Plugin/PluginControl.xaml.cs b/src/SimSteward.Plugin/PluginControl.xaml.cs index 982a1bc..8d8867c 100644 --- a/src/SimSteward.Plugin/PluginControl.xaml.cs +++ b/src/SimSteward.Plugin/PluginControl.xaml.cs @@ -21,7 +21,12 @@ public PluginControl(SimStewardPlugin plugin) Interval = new System.TimeSpan(0, 0, 1) }; _refreshTimer.Tick += RefreshStatus; - Loaded += (s, e) => { _refreshTimer.Start(); RefreshStatus(s, e); }; + Loaded += (s, e) => + { + PluginVersionText.Text = "Version: " + PluginVersionInfo.Display; + _refreshTimer.Start(); + RefreshStatus(s, e); + }; Unloaded += (s, e) => _refreshTimer.Stop(); } diff --git a/src/SimSteward.Plugin/PluginMetricsTelemetry.cs b/src/SimSteward.Plugin/PluginMetricsTelemetry.cs new file mode 100644 index 0000000..db3e612 --- /dev/null +++ b/src/SimSteward.Plugin/PluginMetricsTelemetry.cs @@ -0,0 +1,152 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics.Metrics; +using System.Reflection; +using OpenTelemetry; +using OpenTelemetry.Metrics; +using OpenTelemetry.Resources; + +namespace SimSteward.Plugin +{ + /// + /// Optional OTLP metrics export to a local OpenTelemetry Collector (→ Prometheus). + /// Enabled when OTEL_EXPORTER_OTLP_ENDPOINT or SIMSTEWARD_OTLP_ENDPOINT is set (see docs/observability-local.md). + /// Low-cardinality tags only; host/process samples mirror at the same cadence as structured logs. + /// + public sealed class PluginMetricsTelemetry : IDisposable + { + private readonly MeterProvider _meterProvider; + private readonly Meter _meter; + private readonly Func _getSample; + private readonly KeyValuePair[] _baseTags; + + private PluginMetricsTelemetry( + MeterProvider meterProvider, + Meter meter, + Func getSample, + KeyValuePair[] baseTags) + { + _meterProvider = meterProvider; + _meter = meter; + _getSample = getSample; + _baseTags = baseTags; + _meter.CreateObservableGauge( + "simsteward.plugin.ready", + ObserveReady, + unit: "1", + description: "1 while the SimSteward plugin is loaded."); + _meter.CreateObservableGauge( + "simsteward.process.cpu.percent", + ObserveCpu, + unit: "%", + description: "SimHub process CPU over the last resource sample interval."); + _meter.CreateObservableGauge( + "simsteward.process.working_set_mb", + ObserveWs, + unit: "MiBy", + description: "SimHub process working set."); + } + + /// Returns null if OTLP is not configured (no endpoint env vars). + public static PluginMetricsTelemetry TryCreate( + PluginLogger logger, + Func getSample) + { + var endpoint = FirstNonEmpty( + Environment.GetEnvironmentVariable("OTEL_EXPORTER_OTLP_ENDPOINT"), + Environment.GetEnvironmentVariable("SIMSTEWARD_OTLP_ENDPOINT")); + if (string.IsNullOrWhiteSpace(endpoint)) + return null; + + var trimmed = endpoint.Trim(); + if (!Uri.TryCreate(trimmed, UriKind.Absolute, out var uri)) + { + logger?.Structured("WARN", "simhub-plugin", "otel_metrics_bad_endpoint", + "OTLP endpoint is not a valid URI; OTLP metrics disabled.", + new Dictionary { ["endpoint"] = trimmed }, "lifecycle", null); + return null; + } + + var env = Environment.GetEnvironmentVariable("SIMSTEWARD_LOG_ENV"); + if (string.IsNullOrWhiteSpace(env)) + env = "unknown"; + + var ver = Assembly.GetExecutingAssembly().GetName().Version?.ToString() ?? "0.0.0"; + var baseTags = new[] + { + new KeyValuePair("deployment.environment", env), + }; + + var resource = ResourceBuilder.CreateDefault() + .AddService("sim-steward-plugin", serviceVersion: ver) + .AddAttributes(new Dictionary + { + ["deployment.environment"] = env, + }); + + var meter = new Meter("SimSteward.Plugin", ver); + + var provider = Sdk.CreateMeterProviderBuilder() + .SetResourceBuilder(resource) + .AddMeter("SimSteward.Plugin") + .AddOtlpExporter(o => + { + o.Endpoint = uri; + }) + .Build(); + + var telemetry = new PluginMetricsTelemetry(provider, meter, getSample, baseTags); + + logger?.Structured("INFO", "simhub-plugin", "otel_metrics_started", + "OTLP metrics export enabled (OpenTelemetry → collector).", + new Dictionary + { + ["endpoint"] = uri.GetLeftPart(UriPartial.Authority), + ["deployment_environment"] = env, + }, "lifecycle", null); + + return telemetry; + } + + private static string FirstNonEmpty(params string[] values) + { + foreach (var v in values) + { + if (!string.IsNullOrWhiteSpace(v)) + return v; + } + return null; + } + + private IEnumerable> ObserveReady() + { + yield return new Measurement(1, _baseTags); + } + + private IEnumerable> ObserveCpu() + { + var s = _getSample(); + var v = s?.ProcessCpuPct ?? 0; + yield return new Measurement(v, _baseTags); + } + + private IEnumerable> ObserveWs() + { + var s = _getSample(); + var v = s?.ProcessWorkingSetMb ?? 0; + yield return new Measurement(v, _baseTags); + } + + public void Dispose() + { + try + { + _meterProvider?.Dispose(); + } + catch + { + // ignore + } + } + } +} diff --git a/src/SimSteward.Plugin/PluginState.cs b/src/SimSteward.Plugin/PluginState.cs index 6464e0a..1ddbd8d 100644 --- a/src/SimSteward.Plugin/PluginState.cs +++ b/src/SimSteward.Plugin/PluginState.cs @@ -54,11 +54,23 @@ public class PluginDiagnostics [JsonProperty("diskFreeGb")] public double DiskFreeGb { get; set; } + + /// True when SIMSTEWARD_LOKI_URL env var is configured (Grafana/Loki reachable). + [JsonProperty("grafanaConfigured")] + public bool GrafanaConfigured { get; set; } + + /// True if any race session in the loaded replay has reached Checkered or CoolDown state. + [JsonProperty("replaySessionCompleted")] + public bool ReplaySessionCompleted { get; set; } } /// Minimal snapshot for WebSocket state push. public class PluginSnapshot { + /// Plugin build id (semver + git from AssemblyInformationalVersion). + [JsonProperty("pluginVersion")] + public string PluginVersion { get; set; } = ""; + [JsonProperty("pluginMode")] public string PluginMode { get; set; } = "Unknown"; @@ -98,6 +110,83 @@ public class PluginSnapshot /// M6 dashboard: replay incident index build status and last TR-019 index for current subsession. [JsonProperty("replayIncidentIndex")] public ReplayIncidentIndexDashboardSnapshot ReplayIncidentIndex { get; set; } + + /// Data capture suite state (test harness). + [JsonProperty("dataCaptureSuite")] + public DataCaptureSuiteSnapshot DataCaptureSuite { get; set; } + + /// Active preflight check result (seek-to-end, Grafana ping, etc.). + [JsonProperty("preflight")] + public PreflightSnapshot Preflight { get; set; } + } + + /// A single mini-test within the preflight check. + public class PreflightMiniTest + { + [JsonProperty("id")] public string Id { get; set; } + [JsonProperty("name")] public string Name { get; set; } + [JsonProperty("status")] public string Status { get; set; } = "pending"; // pending, running, pass, fail, skip + [JsonProperty("detail")] public string Detail { get; set; } + [JsonProperty("level")] public int Level { get; set; } + } + + /// Session info extracted from replay YAML during preflight. + public class PreflightSessionInfo + { + [JsonProperty("sessionNum")] public int SessionNum { get; set; } + [JsonProperty("sessionName")] public string SessionName { get; set; } + [JsonProperty("sessionType")] public string SessionType { get; set; } + [JsonProperty("sessionState")] public string SessionState { get; set; } + [JsonProperty("resultsOfficial")] public bool ResultsOfficial { get; set; } + } + + /// Result of the active preflight check triggered by the "Pre-test conditions" button. + public class PreflightSnapshot + { + [JsonProperty("phase")] + public string Phase { get; set; } = "idle"; + + [JsonProperty("level")] + public int Level { get; set; } + + [JsonProperty("replayScope")] + public string ReplayScope { get; set; } = "full"; + + [JsonProperty("correlationId")] + public string CorrelationId { get; set; } + + [JsonProperty("allPassed")] + public bool AllPassed { get; set; } + + [JsonProperty("miniTests")] + public PreflightMiniTest[] MiniTests { get; set; } + + // Legacy flat fields — kept for backward compat + [JsonProperty("grafanaOk")] + public bool GrafanaOk { get; set; } + + [JsonProperty("simHubOk")] + public bool SimHubOk { get; set; } + + [JsonProperty("checkeredOk")] + public bool CheckeredOk { get; set; } + + [JsonProperty("sessionStateAtEnd")] + public int SessionStateAtEnd { get; set; } + + [JsonProperty("resultsPopulated")] + public bool ResultsPopulated { get; set; } + + [JsonProperty("error")] + public string Error { get; set; } + + /// Sessions found in the replay YAML (populated during L1 check). + [JsonProperty("sessions")] + public PreflightSessionInfo[] Sessions { get; set; } + + /// Total replay frame count at time of preflight. + [JsonProperty("replayFrameTotal")] + public int ReplayFrameTotal { get; set; } } /// WebSocket state.replayIncidentIndex (TR-031–TR-033, TR-037–TR-038). diff --git a/src/SimSteward.Plugin/PluginVersionInfo.cs b/src/SimSteward.Plugin/PluginVersionInfo.cs new file mode 100644 index 0000000..f7d4e13 --- /dev/null +++ b/src/SimSteward.Plugin/PluginVersionInfo.cs @@ -0,0 +1,28 @@ +using System.Reflection; + +namespace SimSteward.Plugin +{ + /// Build identity for SimHub UI, WebSocket state, and deploy verification. + public static class PluginVersionInfo + { + private static readonly string DisplayValue = ComputeDisplay(); + + /// Informational version (semver+git) or assembly file version. + public static string Display => DisplayValue; + + private static string ComputeDisplay() + { + var asm = typeof(PluginVersionInfo).Assembly; + var infoAttrs = asm.GetCustomAttributes(typeof(AssemblyInformationalVersionAttribute), false); + if (infoAttrs != null && infoAttrs.Length > 0) + { + var v = ((AssemblyInformationalVersionAttribute)infoAttrs[0]).InformationalVersion; + if (!string.IsNullOrWhiteSpace(v)) + return v.Trim(); + } + + var ver = asm.GetName().Version; + return ver != null ? ver.ToString() : "0.0.0.0"; + } + } +} diff --git a/src/SimSteward.Plugin/SimSteward.Plugin.csproj b/src/SimSteward.Plugin/SimSteward.Plugin.csproj index 7b48669..fe32a45 100644 --- a/src/SimSteward.Plugin/SimSteward.Plugin.csproj +++ b/src/SimSteward.Plugin/SimSteward.Plugin.csproj @@ -8,6 +8,7 @@ ..\..\bin\Plugin\ true true + 1.0.1 @@ -32,6 +33,9 @@ + + + diff --git a/src/SimSteward.Plugin/SimStewardPlugin.DataCaptureSuite.cs b/src/SimSteward.Plugin/SimStewardPlugin.DataCaptureSuite.cs new file mode 100644 index 0000000..ccbc586 --- /dev/null +++ b/src/SimSteward.Plugin/SimStewardPlugin.DataCaptureSuite.cs @@ -0,0 +1,1808 @@ +#if SIMHUB_SDK +using System; +using System.Collections; +using System.Collections.Generic; +using System.Diagnostics; +using System.Linq; +using IRSDKSharper; +using Sentry; + +namespace SimSteward.Plugin +{ + public partial class SimStewardPlugin + { + // ── Internal step enum ──────────────────────────────────────────────── + private enum SuiteInternalStep + { + T0_Rewind, T0_FrameZero, T0_ScanCooldown, T0_SeekCapture, T0_CaptureSettle, + T1_Rewind, T1_FrameZero, T1_Sweep, + T2, T3, T4, + T5_Switch, T5_Settle, + T5b_Seek, T5b_Cycle, T5b_Settle, + T6, + T7_Rewind, T7_FrameZero, T7_Cooldown, + T8_Trigger, T8_Poll, + TDISC_Seek, TDISC_Settle, TDISC_Capture, + Done + } + + // ── Suite fields ────────────────────────────────────────────────────── + private DataCaptureSuitePhase _suitePhase = DataCaptureSuitePhase.Idle; + private SuiteInternalStep _suiteStep = SuiteInternalStep.T0_Rewind; + private string _suiteTestRunId; + private Stopwatch _suiteStopwatch; + private DateTime _suiteEmitCompleteUtc; + private volatile bool _suiteCancelRequested; + private volatile bool _suiteStartRequested; + private string _lokiReadUrl; + private DataCaptureSuiteTestResult[] _suiteResults; + + // ── Skip list ──────────────────────────────────────────────────────── + private HashSet _suiteSkipList = new HashSet(StringComparer.OrdinalIgnoreCase); + + // ── Preflight ──────────────────────────────────────────────────────── + private enum PreflightStep + { + Idle, + Level1_Check, + Level2_SeekEnd, Level2_SettleEnd, Level2_SeekRestore, Level2_SettleRestore, + Level3_EmitProbe, Level3_WaitProbe, Level3_QueryProbe, + Complete + } + private volatile bool _preflightRequested; + private PreflightSnapshot _preflightSnapshot = new PreflightSnapshot(); + private PreflightStep _preflightStep = PreflightStep.Idle; + private int _preflightSavedFrame; + private int _preflightSettleTicks; + private int _preflightLevel; // 0=not run, 1-3 + private string _preflightCorrelationId; + private string _preflightReplayScope = "full"; + private string _preflightProbeNonce; + private int _preflightProbeWaitTicks; + private long _preflightProbeEmitNs; + private volatile int _preflightLokiProbeResult = -2; // -2=not started, -1=error, 0+=count + private string _suitePreflightCorrelationId; + + // T0 scan/select/capture + private List<(int frame, int lap, int carIdx)> _suiteScanCandidates; + private int _suiteFirstScanFrame; + private int[] _suiteSelectedFrames; + private int _suiteCaptureIdx; + private int _suiteCaptureTicks; + + // T_60Hz: high-rate capture + private bool _suite60HzEnabled; + private HighRateTelemetryRecorder _suite60HzRecorder; + + // T_DISC: data discovery + private int _suiteDiscPositionIdx; + private int[] _suiteDiscTargetFrames; + private int _suiteDiscSettleTicks; + + // T0 / T7 shared: ground truth + seek state + private GroundTruthIncident[] _suiteGroundTruth; + private int _suiteGroundTruthIdx; + private GroundTruthIncident[] _suiteReseekCapture; + private int _suiteReseekIdx; + private int _suiteSeekCooldownTicks; + private int _suiteFrameZeroConsecutive; + private int _suiteSeekTimeoutTicks; + + // T1: speed sweep + private int _suiteSpeedSweepIdx; + private int _suiteSpeedSweepTicks; + private int _suiteSpeedSweepFrameTarget; + private int _suiteSpeedSweepDetected; + private int _suiteSpeedSweepGtHits; + private int[] _suiteSpeedSweepBaselineFlags; + + // T5b: camera cycle + private List<(int groupNum, string groupName)> _suiteCameraGroups; + private int _suiteCameraGroupIdx; + private int _suiteCamSettleTicks; + private int _suiteCamConfirmedMatches; + private readonly List _suiteCamGroupsVisited = new List(); + + // T8: FF sweep + private bool _suiteFfSweepTriggered; + private int _suiteT8PollTicks; + private bool _suiteT8BuildWasRunning; + + // ── Public entry points (called from DataUpdate / DispatchAction) ────── + + private void TryStartDataCaptureSuite(string[] skipIds = null) + { + if (!_preflightSnapshot.AllPassed) + { + _logger?.Warn("DataCaptureSuite: cannot start — preflight not passed."); + return; + } + if (_irsdk == null || !_irsdk.IsConnected) + { + _logger?.Warn("DataCaptureSuite: cannot start — iRacing not connected."); + return; + } + string simMode = _irsdk.Data?.SessionInfo?.WeekendInfo?.SimMode ?? ""; + if (!string.Equals(simMode, "replay", StringComparison.OrdinalIgnoreCase)) + { + _logger?.Warn("DataCaptureSuite: cannot start — not in replay mode."); + return; + } + _suiteSkipList = new HashSet(skipIds ?? Array.Empty(), StringComparer.OrdinalIgnoreCase); + // T7 depends on T0 ground truth — auto-skip if T0 is skipped + if (_suiteSkipList.Contains("T0")) _suiteSkipList.Add("T7"); + _suiteStartRequested = true; + } + + /// Called every telemetry tick from OnIrsdkTelemetryDataForReplayIndex. + private void ProcessDataCaptureSuiteTick() + { + // ── Preflight (independent of suite phase) ── + if (_preflightRequested) + { + _preflightRequested = false; + // Force-restart even if a previous run got stuck in an intermediate step + _preflightStep = PreflightStep.Idle; + _preflightLevel = 0; + _preflightCorrelationId = null; + _preflightSnapshot = new PreflightSnapshot(); + try + { + BeginPreflight(); + } + catch (Exception ex) + { + SentrySdk.CaptureException(ex); + _preflightSnapshot.Phase = "error"; + _preflightSnapshot.Error = "BeginPreflight: " + ex.GetType().Name + ": " + ex.Message; + _preflightStep = PreflightStep.Complete; + } + } + if (_preflightStep != PreflightStep.Idle && _preflightStep != PreflightStep.Complete) + { + try { TickPreflight(); } + catch (Exception ex) + { + SentrySdk.CaptureException(ex); + _preflightSnapshot.Phase = "error"; + _preflightSnapshot.Error = "TickPreflight@" + _preflightStep + ": " + ex.GetType().Name + ": " + ex.Message; + _preflightStep = PreflightStep.Complete; + } + } + + if (_suitePhase == DataCaptureSuitePhase.Idle && !_suiteStartRequested && !_suiteCancelRequested) + return; + + if (_suiteCancelRequested) + { + _suiteCancelRequested = false; + if (_suitePhase != DataCaptureSuitePhase.Idle) + { + try { _irsdk?.ReplaySetPlaySpeed(1, false); } catch { } + _suiteStopwatch?.Stop(); + _suite60HzRecorder?.Dispose(); + _suite60HzRecorder = null; + StopReplayIncidentIndexRecordModeLocked("suite_cancel"); + EmitSuiteLifecycleEvent("sdk_capture_suite_cancelled", "Suite cancelled.", "T_cancel"); + _suitePhase = DataCaptureSuitePhase.Cancelled; + } + return; + } + + if (_suitePhase == DataCaptureSuitePhase.Idle && _suiteStartRequested) + { + _suiteStartRequested = false; + BeginDataCaptureSuite(); + return; + } + + if (_suitePhase == DataCaptureSuitePhase.Running) + { + TickSuiteRunning(); + return; + } + + if (_suitePhase == DataCaptureSuitePhase.AwaitingLoki) + TickAwaitingLoki(); + } + + public DataCaptureSuiteSnapshot BuildDataCaptureSuiteSnapshot() + { + var snap = new DataCaptureSuiteSnapshot + { + Phase = _suitePhase.ToString().ToLower(), + TestRunId = _suiteTestRunId ?? "", + ElapsedMs = _suiteStopwatch?.ElapsedMilliseconds ?? 0, + TestResults = _suiteResults, + CurrentStep = (int)_suiteStep, + CurrentStepName = _suitePhase == DataCaptureSuitePhase.Running + ? _suiteStep.ToString() + : _suitePhase.ToString().ToLower(), + }; + + if (!string.IsNullOrEmpty(_suiteTestRunId) && !string.IsNullOrEmpty(_grafanaBaseUrl)) + { + snap.GrafanaExploreUrl = LokiQueryClient.BuildGrafanaExploreUrl(_grafanaBaseUrl, _suiteTestRunId); + if (_suiteResults != null) + { + foreach (var r in _suiteResults) + { + if (!string.IsNullOrEmpty(r.EventName)) + r.GrafanaEventUrl = LokiQueryClient.BuildGrafanaExploreUrl(_grafanaBaseUrl, _suiteTestRunId, r.EventName); + } + } + } + + // Selected incidents summary for dashboard Test Cases panel + if (_suiteGroundTruth != null) + { + var summaries = new List(); + for (int i = 0; i < _suiteGroundTruth.Length; i++) + { + var gt = _suiteGroundTruth[i]; + if (gt == null) continue; + string reason = "first_available"; + if (_suiteSelectedFrames != null && i < _suiteSelectedFrames.Length) + { + // Determine selection reason based on scan candidates + var usedLaps = new HashSet(); + for (int j = 0; j < i; j++) + { + if (_suiteGroundTruth[j] != null) usedLaps.Add(_suiteGroundTruth[j].LapNum); + } + reason = gt.LapNum > DataCaptureSuiteConstants.T0_MinLapForSelection && !usedLaps.Contains(gt.LapNum) + ? "different_lap" : "fallback"; + } + summaries.Add(new SelectedIncidentSummary + { + Index = i, + Frame = gt.ReplayFrameNum, + Lap = gt.LapNum, + DriverName = gt.DriverName, + CarNumber = gt.CarNumber, + CustId = gt.CustId, + Reason = reason + }); + } + if (summaries.Count > 0) + snap.SelectedIncidents = summaries.ToArray(); + } + + return snap; + } + + // ── Skip helper ────────────────────────────────────────────────────── + + private bool TrySkipTest(string testId, SuiteInternalStep nextStep) + { + if (!_suiteSkipList.Contains(testId)) return false; + var r = SuiteResult(testId); + if (r != null) r.Status = "skip"; + _suiteStep = nextStep; + return true; + } + + // ── Preflight state machine ─────────────────────────────────────────── + + private static PreflightMiniTest[] BuildPreflightMiniTests() + { + return new[] + { + new PreflightMiniTest { Id = "PC_WS", Name = "WebSocket connected", Level = 1 }, + new PreflightMiniTest { Id = "PC_PLUGIN", Name = "Plugin responding", Level = 1 }, + new PreflightMiniTest { Id = "PC_SIMHUB", Name = "SimHub HTTP server", Level = 1 }, + new PreflightMiniTest { Id = "PC_GRAFANA", Name = "Grafana/Loki configured", Level = 1 }, + new PreflightMiniTest { Id = "PC_IRACING", Name = "iRacing connected", Level = 1 }, + new PreflightMiniTest { Id = "PC_REPLAY", Name = "Replay mode active", Level = 1 }, + new PreflightMiniTest { Id = "PC_SESSIONS", Name = "Session map", Level = 1 }, + new PreflightMiniTest { Id = "PC_CHECKERED", Name = "Session completed", Level = 2 }, + new PreflightMiniTest { Id = "PC_RESULTS", Name = "Results populated", Level = 2 }, + new PreflightMiniTest { Id = "PC_LOKI_RT", Name = "Loki roundtrip", Level = 3 }, + }; + } + + private PreflightMiniTest PfTest(string id) => + Array.Find(_preflightSnapshot.MiniTests ?? Array.Empty(), t => t.Id == id); + + private void BeginPreflight() + { + // Always run all levels in one pass + int targetLevel = 3; + + // Generate correlation ID on first run or reset + if (string.IsNullOrEmpty(_preflightCorrelationId)) + _preflightCorrelationId = Guid.NewGuid().ToString("D"); + + // Build mini-tests (keep existing results for lower levels if re-running) + if (_preflightSnapshot.MiniTests == null || _preflightLevel == 0) + _preflightSnapshot.MiniTests = BuildPreflightMiniTests(); + + _preflightSnapshot.Phase = "running"; + _preflightSnapshot.CorrelationId = _preflightCorrelationId; + _preflightSnapshot.ReplayScope = _preflightReplayScope; + _preflightSavedFrame = SafeGetInt("ReplayFrameNum"); + _preflightSettleTicks = 0; + _preflightLevel = targetLevel; + _preflightSnapshot.Level = targetLevel; + + // Mark tests at current level as "running", deeper levels as "pending" + foreach (var t in _preflightSnapshot.MiniTests) + { + if (t.Level == targetLevel) t.Status = "running"; + else if (t.Level > targetLevel) t.Status = "pending"; + // Keep lower-level results as-is + } + + _preflightStep = PreflightStep.Level1_Check; + } + + private void TickPreflight() + { + switch (_preflightStep) + { + // ── Level 1: passive checks ────────────────────────────────────── + case PreflightStep.Level1_Check: + { + bool irsdkOk = _irsdk != null && _irsdk.IsConnected; + string simMode = ""; + try { simMode = _irsdk?.Data?.SessionInfo?.WeekendInfo?.SimMode ?? ""; } catch { } + bool replayOk = string.Equals(simMode, "replay", StringComparison.OrdinalIgnoreCase); + + SetPfTest("PC_WS", true, "Plugin-side always true"); // WS is checked dashboard-side; plugin always passes + SetPfTest("PC_PLUGIN", true, "Plugin responding"); + SetPfTest("PC_SIMHUB", _simHubHttpListening, _simHubHttpListening ? "HTTP 8888 listening" : "HTTP 8888 not detected"); + SetPfTest("PC_GRAFANA", !string.IsNullOrEmpty(_lokiBaseUrl), string.IsNullOrEmpty(_lokiBaseUrl) ? "lokiBaseUrl not set" : _lokiBaseUrl); + SetPfTest("PC_IRACING", irsdkOk, irsdkOk ? "SDK connected" : "SDK not connected"); + SetPfTest("PC_REPLAY", replayOk, replayOk ? "SimMode=replay" : "SimMode=" + simMode); + + // Session map from YAML + var sessionList = ReadSessionListFromYaml(); + _preflightSnapshot.Sessions = sessionList; + _preflightSnapshot.ReplayFrameTotal = _replayFrameTotal; + bool hasSessions = sessionList != null && sessionList.Length > 0; + SetPfTest("PC_SESSIONS", hasSessions, + hasSessions ? sessionList.Length + " session(s): " + string.Join(", ", sessionList.Select(s => s.SessionType)) + : "No sessions found in YAML"); + + // Legacy flat fields + _preflightSnapshot.SimHubOk = _simHubHttpListening; + _preflightSnapshot.GrafanaOk = !string.IsNullOrEmpty(_lokiBaseUrl); + + if (_preflightLevel == 1) + { + CompletePreflight(); + return; + } + + // Check L1 pass — if any L1 test failed, stop here + if (!AllLevelPassed(1)) + { + CompletePreflight(); + return; + } + + // Mark L2 tests as running + foreach (var t in _preflightSnapshot.MiniTests) + if (t.Level == 2) t.Status = "running"; + + // Handle partial replay scope — skip L2 seek checks + if (_preflightReplayScope == "partial") + { + SetPfTest("PC_CHECKERED", true, "skip"); + PfTest("PC_CHECKERED").Status = "skip"; + SetPfTest("PC_RESULTS", true, "skip"); + PfTest("PC_RESULTS").Status = "skip"; + + if (_preflightLevel == 2) + { + CompletePreflight(); + return; + } + // Jump to L3 + foreach (var t in _preflightSnapshot.MiniTests) + if (t.Level == 3) t.Status = "running"; + _preflightStep = PreflightStep.Level3_EmitProbe; + return; + } + + // Seek to near-end of replay for L2 + int seekTarget = Math.Max(0, _replayFrameTotal - 10); + try + { + _irsdk.ReplaySetPlaySpeed(1, false); + _irsdk.ReplaySetPlayPosition(IRacingSdkEnum.RpyPosMode.Begin, seekTarget); + } + catch (Exception ex) + { + SentrySdk.CaptureException(ex); + SetPfTest("PC_CHECKERED", false, "seek_failed: " + ex.Message); + SetPfTest("PC_RESULTS", false, "seek_failed"); + _preflightSnapshot.Error = "seek_failed: " + ex.Message; + CompletePreflight(); + return; + } + _preflightStep = PreflightStep.Level2_SettleEnd; + break; + } + + // ── Level 2: seek to end, read session state ───────────────────── + case PreflightStep.Level2_SettleEnd: + { + _preflightSettleTicks++; + int frame = SafeGetInt("ReplayFrameNum"); + int seekTarget = Math.Max(0, _replayFrameTotal - 10); + if (Math.Abs(frame - seekTarget) <= 30 || _preflightSettleTicks > 300) + { + int sessionState = 0; + try { sessionState = _irsdk.Data.GetInt("SessionState"); } catch { } + bool checkeredOk = sessionState >= 6; + bool resultsOk = CheckResultsPositionsPopulated(); + + _preflightSnapshot.SessionStateAtEnd = sessionState; + _preflightSnapshot.CheckeredOk = checkeredOk; + _preflightSnapshot.ResultsPopulated = resultsOk; + + SetPfTest("PC_CHECKERED", checkeredOk, checkeredOk ? "SessionState=" + sessionState : "SessionState=" + sessionState + " (need >=6)"); + SetPfTest("PC_RESULTS", resultsOk, resultsOk ? "ResultsPositions found" : "No ResultsPositions"); + + // Restore saved frame + try { _irsdk.ReplaySetPlayPosition(IRacingSdkEnum.RpyPosMode.Begin, _preflightSavedFrame); } + catch { } + _preflightSettleTicks = 0; + _preflightStep = PreflightStep.Level2_SettleRestore; + } + break; + } + + case PreflightStep.Level2_SettleRestore: + { + _preflightSettleTicks++; + if (_preflightSettleTicks > 10) + { + if (_preflightLevel == 2 || !AllLevelPassed(2)) + { + CompletePreflight(); + return; + } + // Advance to L3 + foreach (var t in _preflightSnapshot.MiniTests) + if (t.Level == 3) t.Status = "running"; + _preflightStep = PreflightStep.Level3_EmitProbe; + } + break; + } + + // ── Level 3: Loki roundtrip probe ──────────────────────────────── + case PreflightStep.Level3_EmitProbe: + { + _preflightProbeNonce = Guid.NewGuid().ToString("N").Substring(0, 12); + _preflightProbeEmitNs = LokiQueryClient.NowNs(); + + // Emit probe event to Loki + var fields = new Dictionary + { + ["preflight_correlation_id"] = _preflightCorrelationId, + ["probe_nonce"] = _preflightProbeNonce, + ["domain"] = "test", + ["testing"] = "true", + }; + MergeSessionAndRoutingFields(fields); + _logger?.Structured("INFO", "simhub-plugin", + DataCaptureSuiteConstants.EventPreflightProbe, + "Preflight probe for Loki roundtrip check.", fields, "test", null); + + _preflightProbeWaitTicks = 0; + _preflightStep = PreflightStep.Level3_WaitProbe; + break; + } + + case PreflightStep.Level3_WaitProbe: + { + _preflightProbeWaitTicks++; + // Wait ~3 seconds (180 ticks at 60Hz) for Loki ingestion + if (_preflightProbeWaitTicks >= 180) + { + _preflightLokiProbeResult = -2; // reset + string nonce = _preflightProbeNonce; + string lokiUrl = _lokiReadUrl ?? _lokiBaseUrl; + long startNs = _preflightProbeEmitNs; + long endNs = LokiQueryClient.NowNs(); + string user = Environment.GetEnvironmentVariable("SIMSTEWARD_LOKI_USER")?.Trim() ?? ""; + string pass = Environment.GetEnvironmentVariable("CURSOR_ELEVATED_GRAFANA_TOKEN")?.Trim() ?? ""; + + System.Threading.Tasks.Task.Run(async () => + { + try + { + string logql = $"{{app=\"sim-steward\"}}|json|probe_nonce=\"{nonce}\""; + int count = await LokiQueryClient.CountMatchingAsync(lokiUrl, logql, startNs, endNs, user, pass).ConfigureAwait(false); + _preflightLokiProbeResult = count; + } + catch + { + _preflightLokiProbeResult = -1; + } + }); + _preflightStep = PreflightStep.Level3_QueryProbe; + } + break; + } + + case PreflightStep.Level3_QueryProbe: + { + int result = _preflightLokiProbeResult; + if (result == -2) return; // still waiting for async Task + + bool ok = result > 0; + string detail = result == -1 ? "Loki query error" + : result == 0 ? "Probe not found in Loki" + : $"Probe found ({result} match)"; + SetPfTest("PC_LOKI_RT", ok, detail); + CompletePreflight(); + break; + } + } + } + + private void SetPfTest(string id, bool pass, string detail) + { + var t = PfTest(id); + if (t == null) return; + // Don't overwrite a "skip" status + if (t.Status == "skip") return; + t.Status = pass ? "pass" : "fail"; + t.Detail = detail; + } + + private bool AllLevelPassed(int level) + { + if (_preflightSnapshot.MiniTests == null) return false; + foreach (var t in _preflightSnapshot.MiniTests) + { + if (t.Level > level) continue; + if (t.Status != "pass" && t.Status != "skip") return false; + } + return true; + } + + private void CompletePreflight() + { + // Determine allPassed: all tests at completed levels must be pass or skip + bool allPassed = true; + foreach (var t in _preflightSnapshot.MiniTests) + { + if (t.Level > _preflightLevel) continue; + if (t.Status != "pass" && t.Status != "skip") { allPassed = false; break; } + } + _preflightSnapshot.AllPassed = allPassed; + _preflightSnapshot.Phase = "complete"; + _preflightStep = PreflightStep.Complete; + + // Emit structured log + var fields = new Dictionary + { + ["preflight_correlation_id"] = _preflightCorrelationId ?? "", + ["level"] = _preflightLevel, + ["replay_scope"] = _preflightReplayScope, + ["all_passed"] = allPassed, + ["domain"] = "test", + ["testing"] = "true", + }; + foreach (var t in _preflightSnapshot.MiniTests) + { + if (t.Level <= _preflightLevel) + fields["pc_" + t.Id.ToLower()] = t.Status; + } + MergeSessionAndRoutingFields(fields); + _logger?.Structured("INFO", "simhub-plugin", + DataCaptureSuiteConstants.EventPreflightCheck, + $"Preflight L{_preflightLevel} complete. all_passed={allPassed}", fields, "test", null); + } + + private bool CheckResultsPositionsPopulated() + { + try + { + var sessionInfo = _irsdk?.Data?.SessionInfo; + if (!(sessionInfo?.SessionInfo?.Sessions is IList list)) return false; + foreach (var o in list) + { + if (o == null) continue; + var t = o.GetType(); + var typeProp = t.GetProperty("SessionType"); + if (!string.Equals(typeProp?.GetValue(o)?.ToString(), "Race", StringComparison.OrdinalIgnoreCase)) + continue; + var resultsProp = t.GetProperty("ResultsPositions"); + var results = resultsProp?.GetValue(o); + if (results is IList resultsList && resultsList.Count > 0) return true; + } + } + catch { } + return false; + } + + // ── Suite init ──────────────────────────────────────────────────────── + + private void InitSuiteResults() + { + _suiteResults = new[] + { + new DataCaptureSuiteTestResult { TestId = "T0", Name = "Ground Truth Capture", EventName = DataCaptureSuiteConstants.EventGroundTruth }, + new DataCaptureSuiteTestResult { TestId = "T1", Name = "Speed Sweep Detection", EventName = DataCaptureSuiteConstants.EventSpeedSample }, + new DataCaptureSuiteTestResult { TestId = "T2", Name = "Variable Inventory", EventName = DataCaptureSuiteConstants.EventVariableInventory }, + new DataCaptureSuiteTestResult { TestId = "T3", Name = "Player Data Snapshot", EventName = DataCaptureSuiteConstants.EventPlayerSnapshot }, + new DataCaptureSuiteTestResult { TestId = "T4", Name = "Driver Roster", EventName = DataCaptureSuiteConstants.EventDriverRoster }, + new DataCaptureSuiteTestResult { TestId = "T5", Name = "Camera Switch", EventName = DataCaptureSuiteConstants.EventCameraSwitchDriver }, + new DataCaptureSuiteTestResult { TestId = "T5b", Name = "Camera View Cycle", EventName = DataCaptureSuiteConstants.EventCameraViewSample }, + new DataCaptureSuiteTestResult { TestId = "T6", Name = "Session Results", EventName = DataCaptureSuiteConstants.EventSessionResults }, + new DataCaptureSuiteTestResult { TestId = "T7", Name = "Incident Re-Seek", EventName = DataCaptureSuiteConstants.EventIncidentReseek }, + new DataCaptureSuiteTestResult { TestId = "T8", Name = "FF Sweep", EventName = DataCaptureSuiteConstants.EventFfSweepResult }, + new DataCaptureSuiteTestResult { TestId = "T_DISC", Name = "Data Point Discovery", EventName = DataCaptureSuiteConstants.EventDataDiscovery }, + }; + + // Append T_60Hz only when feature flag is set + if (_suite60HzEnabled) + { + var list = new List(_suiteResults); + list.Add(new DataCaptureSuiteTestResult { TestId = "T_60Hz", Name = "60Hz Telemetry Dump", EventName = DataCaptureSuiteConstants.Event60HzSummary }); + _suiteResults = list.ToArray(); + } + } + + private DataCaptureSuiteTestResult SuiteResult(string id) + => Array.Find(_suiteResults, r => r.TestId == id); + + private void BeginDataCaptureSuite() + { + _suiteTestRunId = Guid.NewGuid().ToString("D"); + _suitePreflightCorrelationId = _preflightCorrelationId ?? ""; + _suiteStopwatch = Stopwatch.StartNew(); + _suiteGroundTruth = new GroundTruthIncident[3]; + _suiteGroundTruthIdx = 0; + _suiteReseekCapture = new GroundTruthIncident[3]; + _suiteReseekIdx = 0; + _suiteSpeedSweepIdx = 0; + _suiteFfSweepTriggered = false; + _suiteT8PollTicks = 0; + _suiteT8BuildWasRunning = false; + _suiteCamGroupsVisited.Clear(); + _lokiReadUrl = _lokiBaseUrl; + _suiteDiscPositionIdx = 0; + _suiteDiscTargetFrames = null; + + // 60Hz feature flag + _suite60HzEnabled = string.Equals( + Environment.GetEnvironmentVariable("SIMSTEWARD_60HZ_TEST_CAPTURE")?.Trim(), "1"); + _suite60HzRecorder?.Dispose(); + _suite60HzRecorder = null; + if (_suite60HzEnabled) + _suite60HzRecorder = new HighRateTelemetryRecorder(_suiteTestRunId, _pluginDataPath); + + InitSuiteResults(); + + _suiteStep = SuiteInternalStep.T0_Rewind; + _suitePhase = DataCaptureSuitePhase.Running; + + EmitSuiteLifecycleEvent(DataCaptureSuiteConstants.EventSuiteStarted, + $"Data capture suite started. test_run_id={_suiteTestRunId}", "T_start"); + SentrySdk.AddBreadcrumb("Data capture suite started", "lifecycle", + data: new Dictionary { ["test_run_id"] = _suiteTestRunId }); + _logger?.Info($"DataCaptureSuite started. test_run_id={_suiteTestRunId}"); + } + + // ── Main tick dispatcher ────────────────────────────────────────────── + + private void TickSuiteRunning() + { + switch (_suiteStep) + { + case SuiteInternalStep.T0_Rewind: TickT0_Rewind(); break; + case SuiteInternalStep.T0_FrameZero: TickT0_FrameZero(); break; + case SuiteInternalStep.T0_ScanCooldown: TickT0_ScanCooldown(); break; + case SuiteInternalStep.T0_SeekCapture: TickT0_SeekCapture(); break; + case SuiteInternalStep.T0_CaptureSettle: TickT0_CaptureSettle(); break; + case SuiteInternalStep.T1_Rewind: TickT1_Rewind(); break; + case SuiteInternalStep.T1_FrameZero: TickT1_FrameZero(); break; + case SuiteInternalStep.T1_Sweep: TickT1_Sweep(); break; + case SuiteInternalStep.T2: TickT2(); break; + case SuiteInternalStep.T3: TickT3(); break; + case SuiteInternalStep.T4: TickT4(); break; + case SuiteInternalStep.T5_Switch: TickT5_Switch(); break; + case SuiteInternalStep.T5_Settle: TickT5_Settle(); break; + case SuiteInternalStep.T5b_Seek: TickT5b_Seek(); break; + case SuiteInternalStep.T5b_Cycle: TickT5b_Cycle(); break; + case SuiteInternalStep.T5b_Settle: TickT5b_Settle(); break; + case SuiteInternalStep.T6: TickT6(); break; + case SuiteInternalStep.T7_Rewind: TickT7_Rewind(); break; + case SuiteInternalStep.T7_FrameZero: TickT7_FrameZero(); break; + case SuiteInternalStep.T7_Cooldown: TickT7_Cooldown(); break; + case SuiteInternalStep.T8_Trigger: TickT8_Trigger(); break; + case SuiteInternalStep.T8_Poll: TickT8_Poll(); break; + case SuiteInternalStep.TDISC_Seek: TickTDISC_Seek(); break; + case SuiteInternalStep.TDISC_Settle: TickTDISC_Settle(); break; + case SuiteInternalStep.TDISC_Capture: TickTDISC_Capture(); break; + case SuiteInternalStep.Done: TransitionToLoki(); break; + } + + // 60Hz recording: every tick while running + _suite60HzRecorder?.RecordTick(_irsdk); + } + + // ── T0: Ground Truth Capture — two-pass scan/select/capture ────────── + + private void TickT0_Rewind() + { + if (TrySkipTest("T0", SuiteInternalStep.T1_Rewind)) return; + SuiteResult("T0").Status = "pending"; + try + { + _irsdk.ReplaySetPlaySpeed(1, false); + _irsdk.ReplaySearch(IRacingSdkEnum.RpySrchMode.ToStart); + } + catch (Exception ex) + { + _logger?.Warn("DataCaptureSuite T0 rewind: " + ex.Message); + } + + StartReplayIncidentIndexRecordModeLocked("suite_t0"); + _suiteScanCandidates = new List<(int, int, int)>(); + _suiteFirstScanFrame = -1; + _suiteFrameZeroConsecutive = 0; + _suiteSeekTimeoutTicks = 0; + _suiteStep = SuiteInternalStep.T0_FrameZero; + } + + private void TickT0_FrameZero() + { + _suiteSeekTimeoutTicks++; + if (_suiteSeekTimeoutTicks > DataCaptureSuiteConstants.SeekTimeoutTicks) + { + SuiteResult("T0").Status = "fail"; + SuiteResult("T0").Error = "frame_zero_timeout"; + StopReplayIncidentIndexRecordModeLocked("suite_t0_timeout"); + StartT1Rewind(0); + return; + } + + int frame = SafeGetInt("ReplayFrameNum"); + if (frame <= 2) _suiteFrameZeroConsecutive++; + else _suiteFrameZeroConsecutive = 0; + + if (_suiteFrameZeroConsecutive < DataCaptureSuiteConstants.FrameZeroStableTicks) return; + + // Frame zero stable — begin incident scan + _suiteSeekCooldownTicks = DataCaptureSuiteConstants.NextIncidentCooldownTicks; + try { _irsdk.ReplaySearch(IRacingSdkEnum.RpySrchMode.NextIncident); } catch { } + _suiteStep = SuiteInternalStep.T0_ScanCooldown; + } + + private void TickT0_ScanCooldown() + { + if (--_suiteSeekCooldownTicks > 0) return; + + int frame = SafeGetInt("ReplayFrameNum"); + int camCarIdx = SafeGetInt("CamCarIdx"); + int lap = -1; + try { lap = _irsdk.Data.GetInt("CarIdxLap", camCarIdx); } catch { } + + // Detect wraparound: if we've looped back near the first scanned frame + if (_suiteFirstScanFrame < 0) _suiteFirstScanFrame = frame; + bool wrapped = _suiteScanCandidates.Count > 0 && frame <= _suiteFirstScanFrame + DataCaptureSuiteConstants.T0_SeekSettleTolerance; + + if (!wrapped) + _suiteScanCandidates.Add((frame, lap, camCarIdx)); + + // Stop scanning if wrapped or hit max + if (wrapped || _suiteScanCandidates.Count >= DataCaptureSuiteConstants.T0_ScanMaxIncidents) + { + // Select best 3 incidents + _suiteSelectedFrames = SelectGroundTruthFrames(_suiteScanCandidates); + if (_suiteSelectedFrames.Length == 0) + { + SuiteResult("T0").Status = "fail"; + SuiteResult("T0").Error = "no_incidents_found"; + StopReplayIncidentIndexRecordModeLocked("suite_t0_no_incidents"); + StartT1Rewind(0); + return; + } + _suiteGroundTruthIdx = 0; + _suiteCaptureIdx = 0; + _suiteStep = SuiteInternalStep.T0_SeekCapture; + return; + } + + // Scan next incident + _suiteSeekCooldownTicks = DataCaptureSuiteConstants.NextIncidentCooldownTicks; + try { _irsdk.ReplaySearch(IRacingSdkEnum.RpySrchMode.NextIncident); } catch { } + } + + private void TickT0_SeekCapture() + { + if (_suiteCaptureIdx >= _suiteSelectedFrames.Length) + { + FinishT0Capture(); + return; + } + try { _irsdk.ReplaySetPlayPosition(IRacingSdkEnum.RpyPosMode.Begin, _suiteSelectedFrames[_suiteCaptureIdx]); } + catch { } + _suiteCaptureTicks = 0; + _suiteStep = SuiteInternalStep.T0_CaptureSettle; + } + + private void TickT0_CaptureSettle() + { + _suiteCaptureTicks++; + int frame = SafeGetInt("ReplayFrameNum"); + int target = _suiteSelectedFrames[_suiteCaptureIdx]; + + if (Math.Abs(frame - target) <= DataCaptureSuiteConstants.T0_SeekSettleTolerance || _suiteCaptureTicks > DataCaptureSuiteConstants.SeekTimeoutTicks) + { + CaptureGroundTruthIncident(_suiteCaptureIdx); + _suiteCaptureIdx++; + _suiteStep = SuiteInternalStep.T0_SeekCapture; + } + } + + private void FinishT0Capture() + { + int captured = Math.Min(_suiteCaptureIdx, _suiteGroundTruth.Length); + SuiteResult("T0").Status = "emitted"; + SuiteResult("T0").KpiLabel = "incidents_captured"; + SuiteResult("T0").KpiValue = captured.ToString(); + StopReplayIncidentIndexRecordModeLocked("suite_t0_done"); + StartT1Rewind(0); + } + + private static int[] SelectGroundTruthFrames(List<(int frame, int lap, int carIdx)> candidates) + => DataCaptureSuiteSelection.SelectGroundTruthFrames(candidates); + + private void CaptureGroundTruthIncident(int idx) + { + int camCarIdx = SafeGetInt("CamCarIdx"); + int frame = SafeGetInt("ReplayFrameNum"); + double rst = 0; + try { rst = _irsdk.Data.GetDouble("ReplaySessionTime"); } catch { } + + var flags = new int[ReplayIncidentIndexBuild.CarSlotCount]; + for (int i = 0; i < flags.Length; i++) + { + try { flags[i] = _irsdk.Data.GetInt("CarIdxSessionFlags", i); } catch { flags[i] = 0; } + } + + int lap = -1; + float lapDistPct = 0f; + try { lap = _irsdk.Data.GetInt("CarIdxLap", camCarIdx); } catch { } + try { lapDistPct = _irsdk.Data.GetFloat("CarIdxLapDistPct", camCarIdx); } catch { } + + ResolveDriverFromCarIdx(camCarIdx, out string driverName, out string carNumber, out string custId); + + _suiteGroundTruth[idx] = new GroundTruthIncident + { + IncidentIndex = idx, + CarIdx = camCarIdx, + ReplayFrameNum = frame, + ReplaySessionTimeSec = rst, + CarIdxSessionFlagsSnapshot = flags, + DriverName = driverName, + CarNumber = carNumber, + CustId = custId, + LapDistPct = lapDistPct, + LapNum = lap + }; + + var fields = BuildTestFields("T0"); + fields["incident_index"] = idx; + fields["car_idx"] = camCarIdx; + fields["replay_frame"] = frame; + fields["replay_session_time_sec"] = rst; + fields["driver_name"] = driverName; + fields["car_number"] = carNumber; + fields["unique_user_id"] = custId; + fields["lap_dist_pct"] = lapDistPct; + fields["lap_num"] = lap; + MergeSessionAndRoutingFields(fields); + _logger?.Structured("INFO", "simhub-plugin", DataCaptureSuiteConstants.EventGroundTruth, + $"Ground truth {idx}: car_idx={camCarIdx} frame={frame}", fields, "test", null); + } + + // ── T1: Speed Sweep (per speed in [1,4,8,16]) ──────────────────────── + + private void StartT1Rewind(int speedIdx) + { + if (speedIdx == 0 && TrySkipTest("T1", SuiteInternalStep.T2)) return; + _suiteSpeedSweepIdx = speedIdx; + if (speedIdx >= DataCaptureSuiteConstants.SpeedSweepSpeeds.Length) + { + SuiteResult("T1").Status = "emitted"; + _suiteStep = SuiteInternalStep.T2; + return; + } + _suiteStep = SuiteInternalStep.T1_Rewind; + } + + private void TickT1_Rewind() + { + try + { + _irsdk.ReplaySetPlaySpeed(1, false); + _irsdk.ReplaySearch(IRacingSdkEnum.RpySrchMode.ToStart); + } + catch { } + + int speed = DataCaptureSuiteConstants.SpeedSweepSpeeds[_suiteSpeedSweepIdx]; + StartReplayIncidentIndexRecordModeLocked("suite_t1_speed_" + speed); + _suiteSpeedSweepBaselineFlags = new int[ReplayIncidentIndexBuild.CarSlotCount]; + _suiteSpeedSweepDetected = 0; + _suiteSpeedSweepGtHits = 0; + _suiteSpeedSweepTicks = 0; + _suiteFrameZeroConsecutive = 0; + _suiteSeekTimeoutTicks = 0; + _suiteStep = SuiteInternalStep.T1_FrameZero; + } + + private void TickT1_FrameZero() + { + _suiteSeekTimeoutTicks++; + if (_suiteSeekTimeoutTicks > DataCaptureSuiteConstants.SeekTimeoutTicks) + { + StopReplayIncidentIndexRecordModeLocked("suite_t1_timeout"); + StartT1Rewind(_suiteSpeedSweepIdx + 1); + return; + } + + int frame = SafeGetInt("ReplayFrameNum"); + if (frame <= 2) _suiteFrameZeroConsecutive++; + else _suiteFrameZeroConsecutive = 0; + + if (_suiteFrameZeroConsecutive < DataCaptureSuiteConstants.FrameZeroStableTicks) return; + + // Capture baseline flags + for (int i = 0; i < _suiteSpeedSweepBaselineFlags.Length; i++) + { + try { _suiteSpeedSweepBaselineFlags[i] = _irsdk.Data.GetInt("CarIdxSessionFlags", i); } + catch { _suiteSpeedSweepBaselineFlags[i] = 0; } + } + + int lastGtFrame = _suiteGroundTruth.Where(g => g != null) + .Select(g => g.ReplayFrameNum) + .DefaultIfEmpty(0).Max(); + _suiteSpeedSweepFrameTarget = lastGtFrame + DataCaptureSuiteConstants.SpeedSweepAdvanceFrames; + + int speed = DataCaptureSuiteConstants.SpeedSweepSpeeds[_suiteSpeedSweepIdx]; + try { _irsdk.ReplaySetPlaySpeed(speed, false); } catch { } + _suiteStep = SuiteInternalStep.T1_Sweep; + } + + private void TickT1_Sweep() + { + _suiteSpeedSweepTicks++; + int frame = SafeGetInt("ReplayFrameNum"); + + // Detect rising edges on CarIdxSessionFlags (furled or repair flag) + for (int i = 0; i < ReplayIncidentIndexBuild.CarSlotCount; i++) + { + int cur; + try { cur = _irsdk.Data.GetInt("CarIdxSessionFlags", i); } + catch { cur = _suiteSpeedSweepBaselineFlags[i]; } + + bool prevHad = (_suiteSpeedSweepBaselineFlags[i] & DataCaptureSuiteConstants.IncidentFlagMask) != 0; + bool curHas = (cur & DataCaptureSuiteConstants.IncidentFlagMask) != 0; + if (!prevHad && curHas) + { + _suiteSpeedSweepDetected++; + if (_suiteGroundTruth.Any(g => g != null && g.CarIdx == i)) + _suiteSpeedSweepGtHits++; + } + _suiteSpeedSweepBaselineFlags[i] = cur; + } + + if (frame < _suiteSpeedSweepFrameTarget) return; + + // Speed window done + int reqSpeed = DataCaptureSuiteConstants.SpeedSweepSpeeds[_suiteSpeedSweepIdx]; + double effectHz = 60.0 / reqSpeed; + int gtCount = _suiteGroundTruth.Count(g => g != null); + double detRate = gtCount > 0 ? _suiteSpeedSweepGtHits * 100.0 / gtCount : 0; + + var fields = BuildTestFields("T1"); + fields["requested_speed"] = reqSpeed; + fields["actual_play_speed"] = SafeGetInt("ReplayPlaySpeed"); + fields["effective_session_hz"] = Math.Round(effectHz, 4); + fields["tick_count"] = _suiteSpeedSweepTicks; + fields["incidents_detected"] = _suiteSpeedSweepDetected; + fields["ground_truth_hit_count"] = _suiteSpeedSweepGtHits; + fields["ground_truth_miss_count"] = Math.Max(0, gtCount - _suiteSpeedSweepGtHits); + fields["detection_rate_pct"] = Math.Round(detRate, 1); + MergeSessionAndRoutingFields(fields); + _logger?.Structured("INFO", "simhub-plugin", DataCaptureSuiteConstants.EventSpeedSample, + $"Speed sweep {reqSpeed}x: det_rate={detRate:F1}% eff_hz={effectHz:F2}", fields, "test", null); + + SuiteResult("T1").KpiLabel = $"det_rate@{reqSpeed}x"; + SuiteResult("T1").KpiValue = $"{detRate:F1}%"; + + StopReplayIncidentIndexRecordModeLocked("suite_t1_speed_done"); + StartT1Rewind(_suiteSpeedSweepIdx + 1); + } + + // ── T2: Variable Inventory ──────────────────────────────────────────── + + private void TickT2() + { + if (TrySkipTest("T2", SuiteInternalStep.T3)) return; + int varCount = 0; + try + { + var props = _irsdk?.Data?.GetType().GetProperty("TelemetryDataProperties")?.GetValue(_irsdk.Data); + if (props is IEnumerable en) + foreach (var _ in en) varCount++; + } + catch { } + + var fields = BuildTestFields("T2"); + fields["variable_count"] = varCount; + MergeSessionAndRoutingFields(fields); + _logger?.Structured("INFO", "simhub-plugin", DataCaptureSuiteConstants.EventVariableInventory, + $"Variable inventory: {varCount} variables.", fields, "test", null); + + SuiteResult("T2").Status = "emitted"; + SuiteResult("T2").KpiLabel = "variable_count"; + SuiteResult("T2").KpiValue = varCount.ToString(); + _suiteStep = SuiteInternalStep.T3; + } + + // ── T3: Player Data Snapshot ────────────────────────────────────────── + + private void TickT3() + { + if (TrySkipTest("T3", SuiteInternalStep.T4)) return; + double speed = 0, rpm = 0; int gear = 0; float lapDistPct = 0; + try { speed = _irsdk.Data.GetDouble("Speed"); } catch { } + try { rpm = _irsdk.Data.GetDouble("RPM"); } catch { } + try { gear = _irsdk.Data.GetInt("Gear"); } catch { } + try { lapDistPct = _irsdk.Data.GetFloat("LapDistPct"); } catch { } + + var fields = BuildTestFields("T3"); + fields["speed_mps"] = speed; + fields["rpm"] = rpm; + fields["gear"] = gear; + fields["lap_dist_pct"] = lapDistPct; + fields["note"] = "player_car_only"; + MergeSessionAndRoutingFields(fields); + _logger?.Structured("INFO", "simhub-plugin", DataCaptureSuiteConstants.EventPlayerSnapshot, + $"Player snapshot: speed={speed:F1}m/s gear={gear}", fields, "test", null); + + SuiteResult("T3").Status = "emitted"; + _suiteStep = SuiteInternalStep.T4; + } + + // ── T4: Driver Roster ───────────────────────────────────────────────── + + private void TickT4() + { + if (TrySkipTest("T4", SuiteInternalStep.T5_Switch)) return; + var driverList = _irsdk?.Data?.SessionInfo?.DriverInfo?.Drivers as IList; + int driverCount = driverList?.Count ?? 0; + int gtCarsFound = 0; + if (driverList != null) + { + foreach (var d in driverList) + { + if (d == null) continue; + var t = d.GetType(); + var idxObj = t.GetProperty("CarIdx")?.GetValue(d); + int carIdx = idxObj is int ci ? ci : Convert.ToInt32(idxObj ?? -1); + if (_suiteGroundTruth.Any(g => g != null && g.CarIdx == carIdx)) + gtCarsFound++; + } + } + + var fields = BuildTestFields("T4"); + fields["driver_count"] = driverCount; + fields["gt_cars_found"] = gtCarsFound; + MergeSessionAndRoutingFields(fields); + _logger?.Structured("INFO", "simhub-plugin", DataCaptureSuiteConstants.EventDriverRoster, + $"Driver roster: {driverCount} drivers, {gtCarsFound} GT cars.", fields, "test", null); + + SuiteResult("T4").Status = "emitted"; + SuiteResult("T4").KpiLabel = "driver_count"; + SuiteResult("T4").KpiValue = driverCount.ToString(); + + // Seek to GT0 position for T5 camera tests + if (_suiteGroundTruth[0] != null) + { + int sessionNum = SafeGetInt("SessionNum"); + int sessionTimeMs = (int)(_suiteGroundTruth[0].ReplaySessionTimeSec * 1000); + try { _irsdk.ReplaySearchSessionTime(sessionNum, sessionTimeMs); } catch { } + } + _suiteCamSettleTicks = DataCaptureSuiteConstants.CamSettleTicks; + _suiteStep = SuiteInternalStep.T5_Switch; + } + + // ── T5: Camera Switch ───────────────────────────────────────────────── + + private void TickT5_Switch() + { + if (TrySkipTest("T5", SuiteInternalStep.T5b_Seek)) return; + if (_suiteGroundTruth[0] == null) + { + SuiteResult("T5").Status = "skip"; + SuiteResult("T5").Error = "no_ground_truth"; + _suiteStep = SuiteInternalStep.T5b_Seek; + return; + } + + try + { + _irsdk.CamSwitchPos(IRacingSdkEnum.CamSwitchMode.FocusAtDriver, + _suiteGroundTruth[0].CarIdx, 0, 0); + } + catch { } + + _suiteCamSettleTicks = DataCaptureSuiteConstants.CamSettleTicks; + _suiteStep = SuiteInternalStep.T5_Settle; + } + + private void TickT5_Settle() + { + if (--_suiteCamSettleTicks > 0) return; + + int camCarIdx = SafeGetInt("CamCarIdx"); + int camGroup = SafeGetInt("CamGroupNumber"); + if (camGroup == 0) camGroup = SafeGetInt("CameraGroupNumber"); + string camGroupName = ResolveCameraGroupNumToName(camGroup); + bool confirmed = _suiteGroundTruth[0] != null && camCarIdx == _suiteGroundTruth[0].CarIdx; + + var fields = BuildTestFields("T5"); + fields["cam_car_idx"] = camCarIdx; + fields["expected_car_idx"] = _suiteGroundTruth[0]?.CarIdx ?? -1; + fields["confirmed_match"] = confirmed; + fields["cam_group_num"] = camGroup; + fields["cam_group_name"] = camGroupName; + MergeSessionAndRoutingFields(fields); + _logger?.Structured("INFO", "simhub-plugin", DataCaptureSuiteConstants.EventCameraSwitchDriver, + $"Camera switch: cam_car_idx={camCarIdx} confirmed={confirmed}", fields, "test", null); + + SuiteResult("T5").Status = "emitted"; + SuiteResult("T5").KpiLabel = "confirmed"; + SuiteResult("T5").KpiValue = confirmed.ToString().ToLower(); + _suiteStep = SuiteInternalStep.T5b_Seek; + } + + // ── T5b: Camera View Cycle ──────────────────────────────────────────── + + private void TickT5b_Seek() + { + if (TrySkipTest("T5b", SuiteInternalStep.T6)) return; + if (_suiteGroundTruth[0] != null) + { + int sessionNum = SafeGetInt("SessionNum"); + int sessionTimeMs = (int)(_suiteGroundTruth[0].ReplaySessionTimeSec * 1000); + try + { + _irsdk.ReplaySearchSessionTime(sessionNum, sessionTimeMs); + _irsdk.ReplaySetPlaySpeed(0, false); + } + catch { } + } + + _suiteCameraGroups = GetAllCameraGroups(); + _suiteCameraGroupIdx = 0; + _suiteCamConfirmedMatches = 0; + _suiteCamGroupsVisited.Clear(); + + if (_suiteCameraGroups.Count == 0) + { + SuiteResult("T5b").Status = "skip"; + SuiteResult("T5b").Error = "no_camera_groups"; + _suiteStep = SuiteInternalStep.T6; + return; + } + + StartReplayIncidentIndexRecordModeLocked("suite_t5b"); + _suiteStep = SuiteInternalStep.T5b_Cycle; + } + + private void TickT5b_Cycle() + { + if (_suiteCameraGroupIdx >= _suiteCameraGroups.Count) + { + StopReplayIncidentIndexRecordModeLocked("suite_t5b_done"); + + var sf = BuildTestFields("T5b"); + sf["groups_tested"] = _suiteCameraGroups.Count; + sf["confirmed_matches"] = _suiteCamConfirmedMatches; + sf["group_names"] = _suiteCamGroupsVisited.ToArray(); + MergeSessionAndRoutingFields(sf); + _logger?.Structured("INFO", "simhub-plugin", DataCaptureSuiteConstants.EventCameraViewSummary, + $"Camera view cycle: {_suiteCameraGroups.Count} groups, {_suiteCamConfirmedMatches} confirmed.", sf, "test", null); + + SuiteResult("T5b").Status = "emitted"; + SuiteResult("T5b").KpiLabel = "groups_tested"; + SuiteResult("T5b").KpiValue = _suiteCameraGroups.Count.ToString(); + _suiteStep = SuiteInternalStep.T6; + return; + } + + var (groupNum, groupName) = _suiteCameraGroups[_suiteCameraGroupIdx]; + int carIdx = _suiteGroundTruth[0]?.CarIdx ?? 0; + try { _irsdk.CamSwitchPos(IRacingSdkEnum.CamSwitchMode.FocusAtDriver, carIdx, groupNum, 0); } + catch { } + + _suiteCamSettleTicks = DataCaptureSuiteConstants.CamSettleTicks; + _suiteStep = SuiteInternalStep.T5b_Settle; + } + + private void TickT5b_Settle() + { + if (--_suiteCamSettleTicks > 0) return; + + int camCarIdx = SafeGetInt("CamCarIdx"); + int camGroupNum = SafeGetInt("CamGroupNumber"); + if (camGroupNum == 0) camGroupNum = SafeGetInt("CameraGroupNumber"); + int camCamNum = SafeGetInt("CamCameraNumber"); + + var (expectedGroup, expectedGroupName) = _suiteCameraGroups[_suiteCameraGroupIdx]; + int expectedCar = _suiteGroundTruth[0]?.CarIdx ?? -1; + bool confirmed = camCarIdx == expectedCar; + if (confirmed) _suiteCamConfirmedMatches++; + _suiteCamGroupsVisited.Add(expectedGroupName); + + // Per-car arrays for GT0 car + int ci = expectedCar >= 0 ? expectedCar : 0; + int carLap = -1, carPos = -1, carGear = -1; float carRpm = 0, carLdp = 0; int carFlags = 0, trackSurf = -1; + try { carLap = _irsdk.Data.GetInt("CarIdxLap", ci); } catch { } + try { carPos = _irsdk.Data.GetInt("CarIdxPosition", ci); } catch { } + try { carGear = _irsdk.Data.GetInt("CarIdxGear", ci); } catch { } + try { carRpm = _irsdk.Data.GetFloat("CarIdxRPM", ci); } catch { } + try { carLdp = _irsdk.Data.GetFloat("CarIdxLapDistPct", ci); } catch { } + try { carFlags = _irsdk.Data.GetInt("CarIdxSessionFlags", ci); } catch { } + try { trackSurf= _irsdk.Data.GetInt("CarIdxTrackSurface", ci); } catch { } + + var fields = BuildTestFields("T5b"); + fields["cam_group_num"] = expectedGroup; + fields["cam_group_name"] = expectedGroupName; + fields["cam_car_idx"] = camCarIdx; + fields["cam_camera_number"] = camCamNum; + fields["confirmed_match"] = confirmed; + fields["ground_truth_incident_index"] = 0; + fields["car_idx_lap"] = carLap; + fields["car_idx_position"] = carPos; + fields["car_idx_gear"] = carGear; + fields["car_idx_rpm"] = carRpm; + fields["car_idx_lap_dist_pct"] = carLdp; + fields["car_idx_session_flags"] = carFlags; + fields["car_idx_track_surface"] = trackSurf; + MergeSessionAndRoutingFields(fields); + _logger?.Structured("INFO", "simhub-plugin", DataCaptureSuiteConstants.EventCameraViewSample, + $"Camera view sample: group={expectedGroupName} car_idx={camCarIdx}", fields, "test", null); + + _suiteCameraGroupIdx++; + _suiteStep = SuiteInternalStep.T5b_Cycle; + } + + private List<(int groupNum, string groupName)> GetAllCameraGroups() + { + var result = new List<(int, string)>(); + try + { + if (!(_irsdk?.Data?.SessionInfo?.CameraInfo?.Groups is IList groups)) return result; + foreach (var g in groups) + { + if (g == null) continue; + var gt = g.GetType(); + var numProp = gt.GetProperty("GroupNum"); + var nameProp = gt.GetProperty("GroupName"); + if (numProp == null || nameProp == null) continue; + result.Add((Convert.ToInt32(numProp.GetValue(g)), nameProp.GetValue(g)?.ToString() ?? "")); + } + } + catch { } + return result; + } + + // ── T6: Session Results ─────────────────────────────────────────────── + + private void TickT6() + { + if (TrySkipTest("T6", SuiteInternalStep.T7_Rewind)) return; + int subId = _irsdk?.Data?.SessionInfo?.WeekendInfo?.SubSessionID ?? 0; + int sessionNum = SafeGetInt("SessionNum"); + string yaml = _irsdk?.Data?.SessionInfoYaml ?? ""; + + ReplayIncidentIndexResultsYaml.TryParseOfficialIncidentsByCarIdx( + yaml, sessionNum, + out Dictionary byCarIdx, + out int _, + out string _); + + int gtCarsInResults = _suiteGroundTruth + .Where(g => g != null && byCarIdx != null && byCarIdx.ContainsKey(g.CarIdx)) + .Count(); + + var fields = BuildTestFields("T6"); + fields["result_entries"] = byCarIdx?.Count ?? 0; + fields["gt_cars_in_results"] = gtCarsInResults; + fields["subsession_id"] = subId; + MergeSessionAndRoutingFields(fields); + _logger?.Structured("INFO", "simhub-plugin", DataCaptureSuiteConstants.EventSessionResults, + $"Session results: {byCarIdx?.Count ?? 0} entries, {gtCarsInResults} GT cars.", fields, "test", null); + + SuiteResult("T6").Status = "emitted"; + SuiteResult("T6").KpiLabel = "gt_cars_in_results"; + SuiteResult("T6").KpiValue = gtCarsInResults.ToString(); + + // Rewind for T7 + try + { + _irsdk.ReplaySetPlaySpeed(1, false); + _irsdk.ReplaySearch(IRacingSdkEnum.RpySrchMode.ToStart); + } + catch { } + _suiteFrameZeroConsecutive = 0; + _suiteSeekTimeoutTicks = 0; + _suiteReseekIdx = 0; + _suiteStep = SuiteInternalStep.T7_Rewind; + } + + // ── T7: Incident Re-Seek Validation ────────────────────────────────── + + private void TickT7_Rewind() + { + if (TrySkipTest("T7", SuiteInternalStep.T8_Trigger)) return; + // Rewind was issued in TickT6; just reset counters and wait for frame zero + _suiteFrameZeroConsecutive = 0; + _suiteSeekTimeoutTicks = 0; + _suiteStep = SuiteInternalStep.T7_FrameZero; + } + + private void TickT7_FrameZero() + { + _suiteSeekTimeoutTicks++; + if (_suiteSeekTimeoutTicks > DataCaptureSuiteConstants.SeekTimeoutTicks) + { + SuiteResult("T7").Status = "fail"; + SuiteResult("T7").Error = "frame_zero_timeout"; + _suiteStep = SuiteInternalStep.T8_Trigger; + return; + } + + int frame = SafeGetInt("ReplayFrameNum"); + if (frame <= 2) _suiteFrameZeroConsecutive++; + else _suiteFrameZeroConsecutive = 0; + + if (_suiteFrameZeroConsecutive < DataCaptureSuiteConstants.FrameZeroStableTicks) return; + + _suiteReseekIdx = 0; + _suiteSeekCooldownTicks = DataCaptureSuiteConstants.NextIncidentCooldownTicks; + try { _irsdk.ReplaySearch(IRacingSdkEnum.RpySrchMode.NextIncident); } catch { } + _suiteStep = SuiteInternalStep.T7_Cooldown; + } + + private void TickT7_Cooldown() + { + if (--_suiteSeekCooldownTicks > 0) return; + + int frame = SafeGetInt("ReplayFrameNum"); + int camCarIdx = SafeGetInt("CamCarIdx"); + _suiteReseekCapture[_suiteReseekIdx] = new GroundTruthIncident + { + IncidentIndex = _suiteReseekIdx, + CarIdx = camCarIdx, + ReplayFrameNum = frame, + }; + _suiteReseekIdx++; + + if (_suiteReseekIdx < 3) + { + _suiteSeekCooldownTicks = DataCaptureSuiteConstants.NextIncidentCooldownTicks; + try { _irsdk.ReplaySearch(IRacingSdkEnum.RpySrchMode.NextIncident); } catch { } + return; + } + + // All 3 reseeks done — compare against ground truth + int matches = 0; + for (int i = 0; i < 3; i++) + { + var gt = _suiteGroundTruth[i]; + var rs = _suiteReseekCapture[i]; + if (gt != null && rs != null && Math.Abs(rs.ReplayFrameNum - gt.ReplayFrameNum) <= 60) + matches++; + } + + var fields = BuildTestFields("T7"); + fields["matches_within_60_frames"] = matches; + fields["total_reseeks"] = 3; + fields["reseek_frames"] = new[] { _suiteReseekCapture[0]?.ReplayFrameNum ?? 0, _suiteReseekCapture[1]?.ReplayFrameNum ?? 0, _suiteReseekCapture[2]?.ReplayFrameNum ?? 0 }; + fields["gt_frames"] = new[] { _suiteGroundTruth[0]?.ReplayFrameNum ?? 0, _suiteGroundTruth[1]?.ReplayFrameNum ?? 0, _suiteGroundTruth[2]?.ReplayFrameNum ?? 0 }; + MergeSessionAndRoutingFields(fields); + _logger?.Structured("INFO", "simhub-plugin", DataCaptureSuiteConstants.EventIncidentReseek, + $"Incident re-seek: {matches}/3 within ±60 frames.", fields, "test", null); + + SuiteResult("T7").Status = "emitted"; + SuiteResult("T7").KpiLabel = "matches"; + SuiteResult("T7").KpiValue = matches + "/3"; + _suiteStep = SuiteInternalStep.T8_Trigger; + } + + // ── T8: FF Sweep (trigger existing replay index build) ──────────────── + + private void TickT8_Trigger() + { + if (TrySkipTest("T8", SuiteInternalStep.Done)) return; + if (_suiteFfSweepTriggered) { _suiteStep = SuiteInternalStep.T8_Poll; return; } + _suiteFfSweepTriggered = true; + _suiteT8PollTicks = 0; + _suiteT8BuildWasRunning = false; + + var (success, _, err) = DispatchReplayIncidentIndexBuild("start", _suiteTestRunId); + if (!success) + { + SuiteResult("T8").Status = "fail"; + SuiteResult("T8").Error = err ?? "trigger_failed"; + _suiteStep = SuiteInternalStep.TDISC_Seek; + return; + } + _suiteStep = SuiteInternalStep.T8_Poll; + } + + private void TickT8_Poll() + { + _suiteT8PollTicks++; + + ReplayIndexBuildPhase buildPhase; + lock (_replayIndexBuildLock) { buildPhase = _replayIndexBuildPhase; } + + if (buildPhase != ReplayIndexBuildPhase.Idle) { _suiteT8BuildWasRunning = true; return; } + + // Timeout at 60s (3600 ticks at 60Hz) + if (_suiteT8PollTicks > 3600) + { + SuiteResult("T8").Status = "fail"; + SuiteResult("T8").Error = "timeout"; + _suiteStep = SuiteInternalStep.TDISC_Seek; + return; + } + + // Haven't started yet + if (!_suiteT8BuildWasRunning && _suiteT8PollTicks < 30) return; + + // Build completed — cross-ref GT cars + int gtCarsInIndex = 0; + var indexRoot = _replayIndexDashboardCachedRoot; + if (indexRoot?.Incidents != null) + { + foreach (var gt in _suiteGroundTruth) + { + if (gt == null) continue; + if (indexRoot.Incidents.Exists(inc => inc.CarIdx == gt.CarIdx)) + gtCarsInIndex++; + } + } + + var fields = BuildTestFields("T8"); + fields["gt_cars_in_index"] = gtCarsInIndex; + fields["total_incidents_in_index"] = indexRoot?.Incidents?.Count ?? 0; + fields["poll_ticks"] = _suiteT8PollTicks; + MergeSessionAndRoutingFields(fields); + _logger?.Structured("INFO", "simhub-plugin", DataCaptureSuiteConstants.EventFfSweepResult, + $"FF sweep: {gtCarsInIndex} GT cars in index.", fields, "test", null); + + SuiteResult("T8").Status = "emitted"; + SuiteResult("T8").KpiLabel = "gt_cars_in_index"; + SuiteResult("T8").KpiValue = gtCarsInIndex.ToString(); + _suiteStep = SuiteInternalStep.TDISC_Seek; + } + + // ── T_DISC: Data Point Discovery ───────────────────────────────────── + + private static readonly string[] DiscPositionNames = { "frame_zero", "mid_race", "at_incident", "end_of_replay" }; + + private void TickTDISC_Seek() + { + if (TrySkipTest("T_DISC", SuiteInternalStep.Done)) return; + + // Compute target frames on first entry + if (_suiteDiscTargetFrames == null) + { + int incidentFrame = _suiteGroundTruth?[0]?.ReplayFrameNum ?? (_replayFrameTotal * 3 / 4); + _suiteDiscTargetFrames = new[] + { + 0, + Math.Max(1, _replayFrameTotal / 2), + incidentFrame, + Math.Max(0, _replayFrameTotal - 10) + }; + } + + if (_suiteDiscPositionIdx >= _suiteDiscTargetFrames.Length) + { + // All positions captured + int captured = _suiteDiscPositionIdx; + SuiteResult("T_DISC").Status = "emitted"; + SuiteResult("T_DISC").KpiLabel = "positions_captured"; + SuiteResult("T_DISC").KpiValue = captured.ToString(); + _suiteStep = SuiteInternalStep.Done; + return; + } + + int target = _suiteDiscTargetFrames[_suiteDiscPositionIdx]; + try { _irsdk.ReplaySetPlayPosition(IRacingSdkEnum.RpyPosMode.Begin, target); } catch { } + _suiteDiscSettleTicks = 0; + _suiteStep = SuiteInternalStep.TDISC_Settle; + } + + private void TickTDISC_Settle() + { + _suiteDiscSettleTicks++; + int frame = SafeGetInt("ReplayFrameNum"); + int target = _suiteDiscTargetFrames[_suiteDiscPositionIdx]; + + if (Math.Abs(frame - target) <= DataCaptureSuiteConstants.T0_SeekSettleTolerance || _suiteDiscSettleTicks > 300) + { + _suiteStep = SuiteInternalStep.TDISC_Capture; + } + } + + private void TickTDISC_Capture() + { + string posName = _suiteDiscPositionIdx < DiscPositionNames.Length + ? DiscPositionNames[_suiteDiscPositionIdx] : "unknown"; + int frame = SafeGetInt("ReplayFrameNum"); + + var fields = BuildTestFields("T_DISC"); + fields["position"] = posName; + fields["position_idx"] = _suiteDiscPositionIdx; + fields["frame"] = frame; + + // Read SessionState + int sessionState = 0; + try { sessionState = _irsdk.Data.GetInt("SessionState"); } catch { } + fields["session_state"] = sessionState; + + // Read Tier 1 + 2 variables: report populated counts for CarIdx arrays + fields["CarIdxTrackSurface_populated"] = CountPopulated("CarIdxTrackSurface"); + fields["CarIdxPosition_populated"] = CountPopulated("CarIdxPosition"); + fields["CarIdxLap_populated"] = CountPopulated("CarIdxLap"); + fields["CarIdxSessionFlags_populated"] = CountPopulated("CarIdxSessionFlags"); + fields["CarIdxOnPitRoad_populated"] = CountPopulatedBool("CarIdxOnPitRoad"); + fields["CarIdxTrackSurfaceMaterial_populated"] = CountPopulated("CarIdxTrackSurfaceMaterial"); + fields["CarIdxClassPosition_populated"] = CountPopulated("CarIdxClassPosition"); + + // Focused-car telemetry + float latAccel = 0f, lonAccel = 0f, yawRate = 0f; + try { latAccel = _irsdk.Data.GetFloat("LatAccel"); } catch { } + try { lonAccel = _irsdk.Data.GetFloat("LonAccel"); } catch { } + try { yawRate = _irsdk.Data.GetFloat("YawRate"); } catch { } + fields["LatAccel_available"] = latAccel != 0f; + fields["LonAccel_available"] = lonAccel != 0f; + fields["YawRate_available"] = yawRate != 0f; + + // YAML: ResultsPositions + fields["ResultsPositions_populated"] = CheckResultsPositionsPopulated(); + + MergeSessionAndRoutingFields(fields); + _logger?.Structured("INFO", "simhub-plugin", DataCaptureSuiteConstants.EventDataDiscovery, + $"Data discovery at {posName} (frame={frame}, state={sessionState})", fields, "test", null); + + _suiteDiscPositionIdx++; + _suiteStep = SuiteInternalStep.TDISC_Seek; + } + + private int CountPopulated(string carIdxVar) + { + int count = 0; + for (int i = 0; i < ReplayIncidentIndexBuild.CarSlotCount; i++) + { + try { if (_irsdk.Data.GetInt(carIdxVar, i) != 0) count++; } catch { } + } + return count; + } + + private int CountPopulatedBool(string carIdxVar) + { + int count = 0; + for (int i = 0; i < ReplayIncidentIndexBuild.CarSlotCount; i++) + { + try { if (_irsdk.Data.GetBool(carIdxVar, i)) count++; } catch { } + } + return count; + } + + // ── Loki verification ───────────────────────────────────────────────── + + private void TransitionToLoki() + { + // Finalize 60Hz recorder + if (_suite60HzRecorder != null) + { + var stats = _suite60HzRecorder.Finish(); + var r60 = SuiteResult("T_60Hz"); + if (r60 != null) + { + r60.Status = "emitted"; + r60.KpiLabel = "ticks_recorded"; + r60.KpiValue = stats.ticksRecorded.ToString(); + } + var f60 = BuildTestFields("T_60Hz"); + f60["ticks_recorded"] = stats.ticksRecorded; + f60["file_size_bytes"] = stats.fileSizeBytes; + f60["duration_sec"] = stats.durationSec; + f60["file_path"] = _suite60HzRecorder.FilePath; + MergeSessionAndRoutingFields(f60); + _logger?.Structured("INFO", "simhub-plugin", DataCaptureSuiteConstants.Event60HzSummary, + $"60Hz capture: {stats.ticksRecorded} ticks, {stats.fileSizeBytes / 1024}KB.", f60, "test", null); + _suite60HzRecorder.Dispose(); + _suite60HzRecorder = null; + } + + _suiteEmitCompleteUtc = DateTime.UtcNow; + _suitePhase = DataCaptureSuitePhase.AwaitingLoki; + + var fields = BuildTestFields("T_done"); + fields["loki_wait_ms"] = DataCaptureSuiteConstants.LokiVerifyDelayMs; + MergeSessionAndRoutingFields(fields); + _logger?.Structured("INFO", "simhub-plugin", DataCaptureSuiteConstants.EventSuiteComplete, + "Suite complete — awaiting Loki ingestion.", fields, "test", null); + } + + private void TickAwaitingLoki() + { + if ((DateTime.UtcNow - _suiteEmitCompleteUtc).TotalMilliseconds < DataCaptureSuiteConstants.LokiVerifyDelayMs) + return; + RunLokiVerificationAsync(); + } + + private void RunLokiVerificationAsync() + { + if (string.IsNullOrEmpty(_lokiReadUrl)) + { + foreach (var r in _suiteResults) + if (r.Status == "emitted") r.Status = "pass"; + _suitePhase = DataCaptureSuitePhase.Complete; + return; + } + + long startNs = LokiQueryClient.NowMinusMs(3_600_000L); + long endNs = LokiQueryClient.NowNs(); + string user = Environment.GetEnvironmentVariable("SIMSTEWARD_LOKI_USER")?.Trim() ?? ""; + string pass = Environment.GetEnvironmentVariable("CURSOR_ELEVATED_GRAFANA_TOKEN")?.Trim() ?? ""; + string runId = _suiteTestRunId; + var results = _suiteResults; + + System.Threading.Tasks.Task.Run(async () => + { + try + { + foreach (var r in results) + { + if (r.Status != "emitted") continue; + var q = LokiQueryClient.BuildTestRunQuery(runId, r.EventName); + var lines = await LokiQueryClient.QueryLinesAsync(_lokiReadUrl, q, startNs, endNs, user, pass).ConfigureAwait(false); + r.LokiCount = lines.Count; + if (lines.Count == 0) + { + r.Status = "fail"; + r.Error = "not_found_in_loki"; + } + else + { + r.Status = "found"; + var (ok, failReason) = ValidateTestContent(r.TestId, lines); + r.Status = ok ? "pass" : "fail"; + if (!ok) r.Error = failReason; + } + } + } + catch { } + _suitePhase = DataCaptureSuitePhase.Complete; + }); + } + + /// + /// Two-stage content validation per test. Returns (pass, failReason). + /// Stage 1 (found) already confirmed count > 0 before this is called. + /// + private static (bool pass, string failReason) ValidateTestContent(string testId, List lines) + { + switch (testId) + { + case "T0": + return lines.Count >= 3 + ? (true, null) + : (false, $"expected>=3_got_{lines.Count}"); + case "T1": + return lines.Count >= 4 + ? (true, null) + : (false, $"expected>=4_speeds_got_{lines.Count}"); + case "T2": + return lines.Any(j => j["variable_count"] != null) + ? (true, null) + : (false, "missing_variable_count"); + case "T3": + return lines.Any(j => !string.IsNullOrEmpty(j["driver_name"]?.ToString())) + ? (true, null) + : (false, "missing_driver_name"); + case "T4": + { + bool ok = lines.Any(j => int.TryParse(j["driver_count"]?.ToString(), out int dc) && dc > 0); + return ok ? (true, null) : (false, "driver_count_zero_or_missing"); + } + case "T5": + return lines.Any(j => j["cam_group_num"] != null) + ? (true, null) + : (false, "missing_cam_group_num"); + case "T5b": + return lines.Any(j => j["camera_group_name"] != null) + ? (true, null) + : (false, "missing_camera_group_name"); + case "T6": + return (true, null); // existence is sufficient + case "T7": + return lines.Count >= 3 + ? (true, null) + : (false, $"expected>=3_reseeks_got_{lines.Count}"); + case "T8": + { + bool ok = lines.Any(j => int.TryParse(j["gt_cars_in_index"]?.ToString(), out int g) && g >= 1); + return ok ? (true, null) : (false, "gt_cars_in_index<1"); + } + case "T_DISC": + return lines.Count >= 4 + ? (true, null) + : (false, $"expected>=4_positions_got_{lines.Count}"); + case "T_60Hz": + { + bool ok = lines.Any(j => int.TryParse(j["ticks_recorded"]?.ToString(), out int t) && t > 0); + return ok ? (true, null) : (false, "ticks_recorded_zero"); + } + default: + return (true, null); + } + } + + // ── Helpers ─────────────────────────────────────────────────────────── + + private Dictionary BuildTestFields(string testTag) + { + return new Dictionary + { + ["test_run_id"] = _suiteTestRunId ?? "", + ["preflight_correlation_id"] = _suitePreflightCorrelationId ?? "", + ["test_tag"] = testTag, + ["domain"] = "test", + ["testing"] = "true", + }; + } + + private void EmitSuiteLifecycleEvent(string eventName, string message, string testTag) + { + var fields = BuildTestFields(testTag); + MergeSessionAndRoutingFields(fields); + _logger?.Structured("INFO", "simhub-plugin", eventName, message, fields, "test", null); + } + + private void ResolveDriverFromCarIdx(int carIdx, out string driverName, out string carNumber, out string custId) + { + driverName = ""; carNumber = ""; custId = ""; + try + { + if (!(_irsdk?.Data?.SessionInfo?.DriverInfo?.Drivers is IList list)) return; + foreach (var d in list) + { + if (d == null) continue; + var t = d.GetType(); + var idxObj = t.GetProperty("CarIdx")?.GetValue(d); + int idx = idxObj is int ci ? ci : Convert.ToInt32(idxObj ?? -1); + if (idx != carIdx) continue; + driverName = t.GetProperty("UserName")?.GetValue(d)?.ToString() ?? ""; + carNumber = t.GetProperty("CarNumber")?.GetValue(d)?.ToString() ?? ""; + var uid = t.GetProperty("UserID")?.GetValue(d) ?? t.GetProperty("CustID")?.GetValue(d); + custId = uid?.ToString() ?? ""; + return; + } + } + catch { } + } + } +} +#endif diff --git a/src/SimSteward.Plugin/SimStewardPlugin.ReplayIncidentIndexBuild.cs b/src/SimSteward.Plugin/SimStewardPlugin.ReplayIncidentIndexBuild.cs index 49c772a..99f5c90 100644 --- a/src/SimSteward.Plugin/SimStewardPlugin.ReplayIncidentIndexBuild.cs +++ b/src/SimSteward.Plugin/SimStewardPlugin.ReplayIncidentIndexBuild.cs @@ -59,6 +59,15 @@ private void OnIrsdkTelemetryDataForReplayIndex() _logger.Warn("replay_incident_index telemetry: " + ex.Message); } + try + { + ProcessDataCaptureSuiteTick(); + } + catch (Exception ex) + { + _logger.Warn("data_capture_suite tick: " + ex.Message); + } + try { AppendReplayIncidentIndexRecordSampleIfEnabled(); diff --git a/src/SimSteward.Plugin/SimStewardPlugin.ReplayIncidentIndexDashboard.cs b/src/SimSteward.Plugin/SimStewardPlugin.ReplayIncidentIndexDashboard.cs index f61b791..f1c5ea4 100644 --- a/src/SimSteward.Plugin/SimStewardPlugin.ReplayIncidentIndexDashboard.cs +++ b/src/SimSteward.Plugin/SimStewardPlugin.ReplayIncidentIndexDashboard.cs @@ -201,6 +201,56 @@ private void AppendReplayIncidentIndexRecordSampleIfEnabled() } } + /// + /// Starts 60 Hz record mode for the given reason tag. + /// No-op if already recording, not connected, or not in replay mode. + /// + private void StartReplayIncidentIndexRecordModeLocked(string reason) + { + if (_irsdk == null || !_irsdk.IsConnected) return; + string simMode = _irsdk.Data?.SessionInfo?.WeekendInfo?.SimMode ?? ""; + if (!string.Equals(simMode, "replay", StringComparison.OrdinalIgnoreCase)) return; + int sub = _irsdk.Data?.SessionInfo?.WeekendInfo?.SubSessionID ?? 0; + if (sub <= 0) return; + + lock (_replayIndexRecordWriterLock) + { + if (_replayIndexRecordWriter != null) return; // already recording + + string dir = ReplayIncidentIndexOutputPaths.GetRecordSamplesDirectory(); + Directory.CreateDirectory(dir); + string name = sub + "-" + DateTime.UtcNow.ToString("yyyyMMddTHHmmssfffZ", CultureInfo.InvariantCulture) + ".ndjson"; + string path = Path.Combine(dir, name); + try + { + _replayIndexRecordWriter = new StreamWriter( + new FileStream(path, FileMode.CreateNew, FileAccess.Write, FileShare.Read), + new UTF8Encoding(false)) { AutoFlush = true }; + _replayIndexRecordActivePath = path; + _replayIndexRecordLastPath = path; + } + catch + { + return; + } + } + + _replayIndexRecordTicksForStructuredWindow = 0; + _replayIndexRecordModeEnabled = true; + + if (_logger != null) + { + var fields = new Dictionary + { + ["reason"] = reason, + ["record_file"] = _replayIndexRecordActivePath ?? "" + }; + MergeSessionAndRoutingFields(fields); + _logger.Structured("INFO", "simhub-plugin", "replay_incident_index_record_started", + "Replay incident index record mode started.", fields, "lifecycle", null); + } + } + private void StopReplayIncidentIndexRecordModeLocked(string reason) { bool hadWriter; diff --git a/src/SimSteward.Plugin/SimStewardPlugin.cs b/src/SimSteward.Plugin/SimStewardPlugin.cs index 814d515..220be6a 100644 --- a/src/SimSteward.Plugin/SimStewardPlugin.cs +++ b/src/SimSteward.Plugin/SimStewardPlugin.cs @@ -7,14 +7,18 @@ using System; using System.Collections; +using System.Collections.Generic; using System.Diagnostics; using System.IO; using System.Linq; +using System.Text; using System.Reflection; +using System.Net; using System.Net.Http; using System.Net.NetworkInformation; using System.Threading.Tasks; using Newtonsoft.Json; +using Sentry; namespace SimSteward.Plugin { @@ -40,6 +44,7 @@ public partial class SimStewardPlugin #endif private PluginLogger _logger; + private IDisposable _sentryDisposable; private bool _debugMode; private DashboardBridge _bridge; private DateTime _lastBroadcastAt = DateTime.MinValue; @@ -57,16 +62,20 @@ public partial class SimStewardPlugin private volatile bool _simHubHttpListening; private volatile string _dashboardPingStatus = "—"; private DateTime _lastDashboardPingUtc = DateTime.MinValue; + private DateTime _lastAgentHttpDebugUtc = DateTime.MinValue; private int _dataUpdateTick; private SystemMetricsSampler _resourceSampler; private DateTime _nextResourceSampleUtc = DateTime.MaxValue; private int _resourceSampleIntervalSec = 60; private SystemMetricsSample _lastResourceSample; + private PluginMetricsTelemetry _metricsTelemetry; #if SIMHUB_SDK private IRacingSdk _irsdk; private double _lastSessionTime; private string _pluginMode = "Unknown"; + private string _lokiBaseUrl = ""; + private string _grafanaBaseUrl = ""; private int _replayFrameNumEnd; /// Replay length (telemetry ReplayFrameNumEnd); 0 if unknown. private int _replayFrameTotal; @@ -92,6 +101,7 @@ private string BuildStateJson(PluginSnapshot snapshot) var state = new { type = "state", + pluginVersion = snapshot.PluginVersion, pluginMode = snapshot.PluginMode, currentSessionTime = snapshot.CurrentSessionTime, currentSessionTimeFormatted = snapshot.CurrentSessionTimeFormatted, @@ -104,7 +114,9 @@ private string BuildStateJson(PluginSnapshot snapshot) drivers = BuildDriverList(), cameraGroups = GetCameraGroupNames(), diagnostics = snapshot.Diagnostics, - replayIncidentIndex = snapshot.ReplayIncidentIndex + replayIncidentIndex = snapshot.ReplayIncidentIndex, + dataCaptureSuite = snapshot.DataCaptureSuite, + preflight = snapshot.Preflight }; return JsonConvert.SerializeObject(state); } @@ -114,6 +126,7 @@ private string BuildStateJson() var state = new { type = "state", + pluginVersion = PluginVersionInfo.Display, pluginMode = "Unknown", currentSessionTime = 0.0, currentSessionTimeFormatted = "0:00", @@ -248,6 +261,7 @@ private PluginSnapshot BuildPluginSnapshot() return new PluginSnapshot { + PluginVersion = PluginVersionInfo.Display, PluginMode = _pluginMode, CurrentSessionTime = _lastSessionTime, CurrentSessionTimeFormatted = FormatSessionTime(_lastSessionTime), @@ -258,7 +272,9 @@ private PluginSnapshot BuildPluginSnapshot() ReplaySessionNum = replaySessionNum, ReplaySessionName = replaySessionName, Diagnostics = BuildDiagnostics(clientCount), - ReplayIncidentIndex = BuildReplayIncidentIndexDashboardSnapshot() + ReplayIncidentIndex = BuildReplayIncidentIndexDashboardSnapshot(), + DataCaptureSuite = BuildDataCaptureSuiteSnapshot(), + Preflight = _preflightSnapshot }; } @@ -273,7 +289,9 @@ private PluginDiagnostics BuildDiagnostics(int clientCount) WsClients = clientCount, SteamRunning = _steamRunning, SimHubHttpListening = _simHubHttpListening, - DashboardPing = _dashboardPingStatus + DashboardPing = _dashboardPingStatus, + GrafanaConfigured = !string.IsNullOrEmpty(_lokiBaseUrl), + ReplaySessionCompleted = IsReplaySessionCompleted() }; if (_lastResourceSample != null) { @@ -331,6 +349,78 @@ private static string ResolveSessionNameFromYaml(IRacingSdkSessionInfo sessionIn return "—"; } + /// + /// Checks YAML Sessions[].SessionState for any Race session that reached Checkered/CoolDown. + /// Uses reflection (same pattern as ). + /// + private bool IsReplaySessionCompleted() + { + try + { + var sessionInfo = _irsdk?.Data?.SessionInfo; + if (!(sessionInfo?.SessionInfo?.Sessions is IList list)) return false; + foreach (var o in list) + { + if (o == null) continue; + var t = o.GetType(); + var typeProp = t.GetProperty("SessionType"); + var stateProp = t.GetProperty("ResultsOfficial"); + // Check if it's a Race session + var sessionType = typeProp?.GetValue(o)?.ToString() ?? ""; + if (!string.Equals(sessionType, "Race", StringComparison.OrdinalIgnoreCase)) continue; + // ResultsOfficial = 1 means session completed with official results + var official = stateProp?.GetValue(o); + if (official is int ival && ival >= 1) return true; + if (int.TryParse(official?.ToString(), out int parsed) && parsed >= 1) return true; + } + } + catch { } + return false; + } + + private PreflightSessionInfo[] ReadSessionListFromYaml() + { + try + { + var sessionInfo = _irsdk?.Data?.SessionInfo; + if (!(sessionInfo?.SessionInfo?.Sessions is IList list) || list.Count == 0) + return null; + + var result = new List(); + foreach (var o in list) + { + if (o == null) continue; + var t = o.GetType(); + var numProp = t.GetProperty("SessionNum"); + var nameProp = t.GetProperty("SessionName"); + var typeProp = t.GetProperty("SessionType"); + var officialProp = t.GetProperty("ResultsOfficial"); + + int num = 0; + if (numProp?.GetValue(o) is int n) num = n; + else int.TryParse(numProp?.GetValue(o)?.ToString(), out num); + + bool official = false; + var offVal = officialProp?.GetValue(o); + if (offVal is int oi) official = oi >= 1; + else if (int.TryParse(offVal?.ToString(), out int op)) official = op >= 1; + + result.Add(new PreflightSessionInfo + { + SessionNum = num, + SessionName = nameProp?.GetValue(o)?.ToString() ?? "", + SessionType = typeProp?.GetValue(o)?.ToString() ?? "", + ResultsOfficial = official, + }); + } + return result.Count > 0 ? result.ToArray() : null; + } + catch + { + return null; + } + } + private object[] BuildDriverList() { try @@ -791,6 +881,51 @@ private System.Collections.Generic.Dictionary BuildCaptureIncide } } + if (string.Equals(action, "data_capture_suite", StringComparison.OrdinalIgnoreCase)) + { + var raw = (arg ?? "").Trim(); + var colonIdx = raw.IndexOf(':'); + var verb = (colonIdx >= 0 ? raw.Substring(0, colonIdx) : raw).ToLowerInvariant(); + var skipParam = colonIdx >= 0 ? raw.Substring(colonIdx + 1) : ""; + var skipIds = string.IsNullOrEmpty(skipParam) + ? Array.Empty() + : skipParam.Split(new[] { ',' }, StringSplitOptions.RemoveEmptyEntries); + switch (verb) + { + case "start": + TryStartDataCaptureSuite(skipIds); + LogActionResult(action, arg, correlationId, true, ""); + return (true, "ok", null); + case "preflight": + _preflightRequested = true; + LogActionResult(action, arg, correlationId, true, ""); + return (true, "ok", null); + case "preflight_scope": + _preflightReplayScope = string.Equals(skipParam, "partial", StringComparison.OrdinalIgnoreCase) ? "partial" : "full"; + _preflightSnapshot.ReplayScope = _preflightReplayScope; + LogActionResult(action, arg, correlationId, true, ""); + return (true, "ok", null); + case "preflight_reset": + _preflightLevel = 0; + _preflightCorrelationId = null; + _preflightSnapshot = new PreflightSnapshot(); + _preflightStep = PreflightStep.Idle; + LogActionResult(action, arg, correlationId, true, ""); + return (true, "ok", null); + case "cancel": + _suiteCancelRequested = true; + LogActionResult(action, arg, correlationId, true, ""); + return (true, "ok", null); + case "verify": + _suiteEmitCompleteUtc = DateTime.MinValue; // force re-verify on next tick + LogActionResult(action, arg, correlationId, true, ""); + return (true, "ok", null); + default: + LogActionResult(action, arg, correlationId, false, "bad_arg"); + return (false, null, "bad_arg"); + } + } + if (string.Equals(action, "replay_incident_index_seek", StringComparison.OrdinalIgnoreCase)) { return DispatchReplayIncidentIndexSeek(arg, correlationId); @@ -855,9 +990,10 @@ private void RefreshDependencyChecks() _steamRunning = false; } + IPEndPoint[] listeners = Array.Empty(); try { - var listeners = IPGlobalProperties.GetIPGlobalProperties().GetActiveTcpListeners(); + listeners = IPGlobalProperties.GetIPGlobalProperties().GetActiveTcpListeners(); _simHubHttpListening = listeners.Any(e => e.Port == 8888); } catch @@ -870,25 +1006,86 @@ private void RefreshDependencyChecks() return; _lastDashboardPingUtc = now; + // #region agent log + if ((now - _lastAgentHttpDebugUtc).TotalSeconds >= DashboardPingIntervalSec) + { + _lastAgentHttpDebugUtc = now; + var ep8888 = listeners.Where(e => e.Port == 8888).Select(e => e.Address.ToString() + ":" + e.Port).ToArray(); + WriteAgentHttpDebug("H1,H2,H4", "simhub_http_listeners_8888", new Dictionary + { + ["count"] = ep8888.Length, + ["endpoints"] = string.Join(";", ep8888), + ["simHubHttpListening"] = _simHubHttpListening, + ["hint_localhost_ipv6"] = "If browser uses localhost and refuses, try http://127.0.0.1:8888/... (H2)" + }); + } + // #endregion + Task.Run(() => { + const string pingUrl = "http://127.0.0.1:8888/Web/sim-steward-dash/index.html"; try { var response = DashboardPingClient - .GetAsync("http://127.0.0.1:8888/Web/sim-steward-dash/index.html") + .GetAsync(pingUrl) .ConfigureAwait(false) .GetAwaiter() .GetResult(); _dashboardPingStatus = response.IsSuccessStatusCode ? $"OK ({(int)response.StatusCode})" : $"HTTP {(int)response.StatusCode}"; + // #region agent log + WriteAgentHttpDebug("H3", "dashboard_ping_ok", new Dictionary + { + ["url"] = pingUrl, + ["statusCode"] = (int)response.StatusCode, + ["success"] = response.IsSuccessStatusCode + }); + // #endregion } catch (Exception ex) { _dashboardPingStatus = "Error: " + ex.Message; + // #region agent log + WriteAgentHttpDebug("H1,H3,H5", "dashboard_ping_error", new Dictionary + { + ["url"] = pingUrl, + ["error"] = ex.GetType().Name, + ["message"] = ex.Message + }); + // #endregion } }); } + + // #region agent log + private void WriteAgentHttpDebug(string hypothesisId, string message, Dictionary data) + { + try + { + var baseDir = !string.IsNullOrEmpty(_pluginDataPath) + ? _pluginDataPath + : Path.Combine(Environment.GetFolderPath(Environment.SpecialFolder.LocalApplicationData), "SimHubWpf", "PluginsData", "SimSteward"); + Directory.CreateDirectory(baseDir); + var envPath = Environment.GetEnvironmentVariable("SIMSTEWARD_DEBUG_LOG_PATH"); + var path = !string.IsNullOrWhiteSpace(envPath) ? envPath.Trim() : Path.Combine(baseDir, "debug-959be8.log"); + var payload = new Dictionary + { + ["sessionId"] = "959be8", + ["hypothesisId"] = hypothesisId, + ["location"] = "SimStewardPlugin.cs:RefreshDependencyChecks", + ["message"] = message, + ["timestamp"] = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds(), + ["data"] = data + }; + File.AppendAllText(path, JsonConvert.SerializeObject(payload) + "\n", Encoding.UTF8); + } + catch + { + // ignore + } + } + // #endregion #endif #if SIMHUB_SDK @@ -900,16 +1097,48 @@ public void Init(PluginManager pluginManager) "SimHubWpf", "PluginsData", "SimSteward"); _debugMode = Environment.GetEnvironmentVariable("SIMSTEWARD_LOG_DEBUG") == "1"; + _lokiBaseUrl = Environment.GetEnvironmentVariable("SIMSTEWARD_LOKI_URL")?.Trim() ?? ""; + _grafanaBaseUrl = Environment.GetEnvironmentVariable("SIMSTEWARD_GRAFANA_URL")?.Trim() ?? "http://localhost:3000"; + + var sentryDsn = Environment.GetEnvironmentVariable("SIMSTEWARD_SENTRY_DSN")?.Trim(); + if (string.IsNullOrWhiteSpace(sentryDsn)) + sentryDsn = "https://ab2d0a6f7cd97033a46f4fa7d90dabab@o4511097126780928.ingest.us.sentry.io/4511102961319936"; + _logger = new PluginLogger(_pluginDataPath, isDebugMode: _debugMode); _logger.SetSpineProvider(() => (_currentSessionId ?? "", _currentSessionSeq ?? "", _replayFrameNumEnd)); _logger.WriteError += OnLogWriteError; _logger.Structured("INFO", "simhub-plugin", "logging_ready", "Logging pipeline ready; init continuing.", null, "lifecycle", null); + try + { + _sentryDisposable = SentrySdk.Init(o => + { + o.Dsn = sentryDsn; + o.Environment = "local"; + o.Release = PluginVersionInfo.Display; + o.TracesSampleRate = 1.0; + o.IsGlobalModeEnabled = true; + o.SetBeforeSend((sentryEvent, hint) => + { + sentryEvent.SetTag("plugin_mode", _pluginMode ?? "Unknown"); + sentryEvent.SetTag("iracing_connected", (_irsdk?.IsConnected ?? false).ToString().ToLowerInvariant()); + return sentryEvent; + }); + }); + } + catch (Exception ex) + { + _logger.Structured("WARN", "simhub-plugin", "sentry_init_failed", + $"Sentry SDK init failed: {ex.Message}", + new System.Collections.Generic.Dictionary { ["error"] = ex.Message }, "lifecycle", null); + } + var structuredPath = _logger.StructuredLogPath; _logger.Structured("INFO", "simhub-plugin", "file_tail_ready", "Structured log file ready for Loki ingestion.", new System.Collections.Generic.Dictionary { ["path"] = structuredPath ?? "(none)" }, "lifecycle", null); _logger.Structured("INFO", "simhub-plugin", "plugin_started", "SimSteward plugin starting.", null, "lifecycle", null); + SentrySdk.AddBreadcrumb("Plugin started", "lifecycle"); pluginManager.AddProperty("SimSteward.PluginMode", GetType(), "Unknown"); pluginManager.AddProperty("SimSteward.IncidentCount", GetType(), 0); @@ -959,9 +1188,12 @@ public void Init(PluginManager pluginManager) _bridge.Start(wsBind, wsPort, wsToken); _logger.Structured("INFO", "simhub-plugin", "plugin_ready", "SimSteward ready.", new System.Collections.Generic.Dictionary { ["ws_port"] = _wsPort }, "lifecycle", null); + SentrySdk.AddBreadcrumb("WebSocket bridge started", "lifecycle", + data: new Dictionary { ["port"] = _wsPort.ToString() }); } catch (Exception ex) { + SentrySdk.CaptureException(ex); _logger.Structured("WARN", "simhub-plugin", "bridge_start_failed", $"WebSocket server could not start: {ex.Message}", new System.Collections.Generic.Dictionary { ["bind"] = wsBind, ["port"] = _wsPort, ["error"] = ex.Message }, "lifecycle", null); _logger.Error($"WebSocket server could not start on {wsBind}:{wsPort}. Is it already in use?", ex); @@ -977,6 +1209,7 @@ public void Init(PluginManager pluginManager) _irsdk.UpdateInterval = 1; _irsdk.OnConnected += () => { + SentrySdk.AddBreadcrumb("iRacing connected", "lifecycle"); _logger?.Structured("INFO", "simhub-plugin", "iracing_connected", "iRacing connected.", null, "lifecycle", null); if (_irsdk != null && _logger != null) { @@ -987,6 +1220,7 @@ public void Init(PluginManager pluginManager) }; _irsdk.OnDisconnected += () => { + SentrySdk.AddBreadcrumb("iRacing disconnected", "lifecycle"); ReplayIncidentIndexOnIracingDisconnected(); _replayIncidentIndexPrereqLogKey = ""; _logger?.Structured("INFO", "simhub-plugin", "iracing_disconnected", "iRacing disconnected.", null, "lifecycle", null); @@ -998,6 +1232,7 @@ public void Init(PluginManager pluginManager) } catch (Exception ex) { + SentrySdk.CaptureException(ex); _logger.Error("iRacing SDK (IRSDKSharper) failed to start. Plugin will run without iRacing data.", ex); _irsdk = null; } @@ -1016,6 +1251,21 @@ public void Init(PluginManager pluginManager) _resourceSampler = new SystemMetricsSampler(); _nextResourceSampleUtc = DateTime.UtcNow.AddSeconds(_resourceSampleIntervalSec); + try + { + _metricsTelemetry = PluginMetricsTelemetry.TryCreate(_logger, () => _lastResourceSample); + } + catch (Exception ex) + { + SentrySdk.CaptureException(ex); + _logger?.Structured("WARN", "simhub-plugin", "otel_metrics_load_failed", + $"OTLP metrics disabled — assembly load failure: {ex.Message}", + new System.Collections.Generic.Dictionary + { + ["exception_type"] = ex.GetType().Name, + }, "lifecycle", null); + } + RefreshDependencyChecks(); } @@ -1184,6 +1434,19 @@ public void End(PluginManager pluginManager) { _logger?.Structured("INFO", "simhub-plugin", "plugin_stopped", "SimSteward plugin End.", null, "lifecycle", null); + try { _sentryDisposable?.Dispose(); } catch { } + _sentryDisposable = null; + + try + { + _metricsTelemetry?.Dispose(); + } + catch + { + // ignore + } + _metricsTelemetry = null; + StopReplayIncidentIndexRecordModeLocked("plugin_end"); if (_logger != null) diff --git a/tests/DeployLokiEventsTest.ps1 b/tests/DeployLokiEventsTest.ps1 new file mode 100644 index 0000000..6d7da70 --- /dev/null +++ b/tests/DeployLokiEventsTest.ps1 @@ -0,0 +1,187 @@ +# Post-deploy test: verify all deploy events reached Loki with expected values +# Requires: local Loki running on localhost:3100, deploy.ps1 just completed +# Run: .\tests\DeployLokiEventsTest.ps1 + +$ErrorActionPreference = "Stop" + +$repoRoot = (Resolve-Path (Join-Path $PSScriptRoot '..')).Path +$loadDotenv = Join-Path $repoRoot 'scripts\load-dotenv.ps1' +if (Test-Path $loadDotenv) { + . $loadDotenv + Import-DotEnv (Resolve-SimStewardEnvPaths -RepoRoot $repoRoot) +} + +$lokiUrl = $env:SIMSTEWARD_LOKI_URL +if ([string]::IsNullOrWhiteSpace($lokiUrl)) { $lokiUrl = "http://localhost:3100" } +Write-Host "Loki: $lokiUrl" + +$passed = 0 +$failed = 0 + +function Query-LokiEvent { + param([string]$EventName, [int]$LookbackMinutes = 30) + $startNs = ([DateTimeOffset]::UtcNow.AddMinutes(-$LookbackMinutes).ToUnixTimeMilliseconds()) * 1000000 + $endNs = ([DateTimeOffset]::UtcNow.ToUnixTimeMilliseconds()) * 1000000 + $query = '{component="local-deployment"} |= "' + $EventName + '"' + $base = "$($lokiUrl.TrimEnd('/'))/loki/api/v1/query_range" + + # Build URL with query string manually to avoid PowerShell encoding issues + $qs = "query=$([Uri]::EscapeDataString($query))&start=$startNs&end=$endNs&limit=10&direction=BACKWARD" + $uri = "$base`?$qs" + + $headers = @{} + $lokiUser = $env:SIMSTEWARD_LOKI_USER + $lokiPass = $env:SIMSTEWARD_LOKI_TOKEN + if (-not [string]::IsNullOrWhiteSpace($lokiUser) -and -not [string]::IsNullOrWhiteSpace($lokiPass) -and $lokiUrl -match 'grafana\.net') { + $pair = [Text.Encoding]::ASCII.GetBytes(("{0}:{1}" -f $lokiUser.Trim(), $lokiPass.Trim())) + $headers['Authorization'] = 'Basic ' + [Convert]::ToBase64String($pair) + } + + try { + $raw = Invoke-WebRequest -Uri $uri -Method Get -Headers $headers -TimeoutSec 10 -UseBasicParsing + $resp = $raw.Content | ConvertFrom-Json + } catch { + Write-Host " DEBUG: Loki query failed for $EventName : $($_.Exception.Message)" + Write-Host " DEBUG: URI = $uri" + return @() + } + + $entries = @() + if ($null -eq $resp.data -or $null -eq $resp.data.result) { return $entries } + foreach ($stream in $resp.data.result) { + if ($null -eq $stream.values) { continue } + foreach ($val in $stream.values) { + $line = $val[1] + if ($null -ne $line) { + $entries += ($line | ConvertFrom-Json) + } + } + } + return $entries +} + +function Assert-Event { + param( + [string]$EventName, + [hashtable]$ExpectedFields = @{} + ) + + $entries = Query-LokiEvent $EventName + if ($entries.Count -eq 0) { + Write-Host "FAIL: [$EventName] not found in Loki" + $script:failed++ + return + } + Write-Host "PASS: [$EventName] found in Loki ($($entries.Count) entry/entries)" + $script:passed++ + + $entry = $entries[0] + foreach ($key in $ExpectedFields.Keys) { + $expected = $ExpectedFields[$key] + $actual = $entry.$key + if ($expected -is [scriptblock]) { + $ok = & $expected $actual + if ($ok) { + Write-Host "PASS: [$EventName] $key validated" + $script:passed++ + } else { + Write-Host "FAIL: [$EventName] $key validation failed (actual=$actual)" + $script:failed++ + } + } else { + if ("$actual" -eq "$expected") { + Write-Host "PASS: [$EventName] $key=$actual" + $script:passed++ + } else { + Write-Host "FAIL: [$EventName] $key expected=$expected actual=$actual" + $script:failed++ + } + } + } +} + +# ── Verify every deploy event ──────────────────────────────────────────────── + +Assert-Event 'deploy_started' @{ + level = 'INFO' + machine = { param($v) -not [string]::IsNullOrWhiteSpace($v) } + git_branch = { param($v) -not [string]::IsNullOrWhiteSpace($v) } + git_sha = { param($v) -not [string]::IsNullOrWhiteSpace($v) } +} + +Assert-Event 'deploy_simhub_found' @{ + level = 'INFO' + simhub_path = { param($v) -not [string]::IsNullOrWhiteSpace($v) } +} + +Assert-Event 'deploy_build_started' @{ + level = 'INFO' +} + +Assert-Event 'deploy_build_result' @{ + level = 'INFO' + status = 'ok' + duration_s = { param($v) $null -ne $v -and [double]$v -ge 0 } +} + +Assert-Event 'deploy_tests_result' @{ + level = 'INFO' + status = { param($v) $v -in @('ok', 'skipped') } +} + +Assert-Event 'deploy_simhub_stopped' @{ + level = 'INFO' +} + +Assert-Event 'deploy_dlls_cleaned' @{ + level = 'INFO' + deleted = { param($v) -not [string]::IsNullOrWhiteSpace($v) } +} + +Assert-Event 'deploy_dlls_copied' @{ + level = 'INFO' + dlls = { param($v) $v -match 'SimSteward\.Plugin\.dll' } +} + +Assert-Event 'deploy_dashboard_copied' @{ + level = 'INFO' + dashboards = { param($v) $v -match 'index\.html' } +} + +Assert-Event 'deploy_verified' @{ + level = 'INFO' + status = 'ok' +} + +Assert-Event 'deploy_version_resolved' @{ + plugin_version = { param($v) -not [string]::IsNullOrWhiteSpace($v) -and $v -ne 'unknown' } +} + +Assert-Event 'deploy_simhub_launch' @{ + level = 'INFO' + status = { param($v) $v -in @('launched', 'skipped') } +} + +Assert-Event 'deploy_post_tests_started' @{ + level = 'INFO' +} + +Assert-Event 'deploy_port_probe' @{ + port = '8888' +} + +Assert-Event 'deploy_completed' @{ + level = { param($v) $v -in @('INFO', 'WARN') } + status = { param($v) $v -in @('ok', 'completed_with_warnings') } + plugin_version = { param($v) -not [string]::IsNullOrWhiteSpace($v) } + duration_s = { param($v) $null -ne $v -and [double]$v -gt 0 } +} + +# ── Summary ────────────────────────────────────────────────────────────────── +Write-Host "" +Write-Host "Loki deploy events: $passed passed, $failed failed" +if ($failed -gt 0) { + Write-Host "FAIL: $failed assertion(s) failed" + exit 1 +} +Write-Host "All deploy Loki event checks passed." diff --git a/tests/ReplayWorkflowTest.ps1 b/tests/ReplayWorkflowTest.ps1 index 56e1804..cfc5f38 100644 --- a/tests/ReplayWorkflowTest.ps1 +++ b/tests/ReplayWorkflowTest.ps1 @@ -1,4 +1,4 @@ -# Replay capture workflow test — WebSocket state shape +# Replay capture workflow test - WebSocket state shape # Requires: SimHub running with Sim Steward plugin loaded # Run: .\tests\ReplayWorkflowTest.ps1 @@ -8,7 +8,7 @@ $timeoutMs = 8000 Add-Type -AssemblyName System.Core -# --- 1. WebSocket connect and receive state (Test case 1 — Detect) --- +# --- 1. WebSocket connect and receive state (Test case 1 - Detect) --- $uri = [System.Uri]::new("ws://127.0.0.1:$port/") $ws = New-Object System.Net.WebSockets.ClientWebSocket $cts = New-Object System.Threading.CancellationTokenSource @@ -59,6 +59,16 @@ try { } Write-Host "PASS: State has pluginMode=$mode" + if (-not $stateObj.PSObject.Properties["pluginVersion"]) { + Write-Host "FAIL: State missing pluginVersion" + exit 1 + } + if ([string]::IsNullOrWhiteSpace([string]$stateObj.pluginVersion)) { + Write-Host "FAIL: pluginVersion empty" + exit 1 + } + Write-Host "PASS: State has pluginVersion=$($stateObj.pluginVersion)" + # Expect: diagnostics present (WebSocket state mirrors PluginSnapshot.Diagnostics) if (-not $stateObj.PSObject.Properties["diagnostics"]) { Write-Host "FAIL: State missing diagnostics" @@ -77,7 +87,7 @@ try { Write-Host "PASS: State lap absent (ok for older clients)" } - # When sessions array present, expect structure (Test case 2 — Sessions list) + # When sessions array present, expect structure (Test case 2 - Sessions list) if ($diag.PSObject.Properties["sessions"] -and $null -ne $diag.sessions) { $sessions = $diag.sessions if ($sessions -isnot [Array]) { diff --git a/tests/WebSocketConnectTest.ps1 b/tests/WebSocketConnectTest.ps1 index 6452bde..726d9b3 100644 --- a/tests/WebSocketConnectTest.ps1 +++ b/tests/WebSocketConnectTest.ps1 @@ -60,6 +60,15 @@ try { $obj = $json | ConvertFrom-Json if ($obj.type -eq "state") { Write-Host "PASS: Received state message (pluginMode=$($obj.pluginMode))" + if (-not $obj.PSObject.Properties["pluginVersion"]) { + Write-Host "FAIL: State missing pluginVersion" + exit 1 + } + if ([string]::IsNullOrWhiteSpace([string]$obj.pluginVersion)) { + Write-Host "FAIL: pluginVersion empty" + exit 1 + } + Write-Host "PASS: pluginVersion=$($obj.pluginVersion)" if ($null -ne $obj.incidents) { Write-Host "PASS: State contains incidents array (count=$($obj.incidents.Count))" } diff --git a/tests/contextstream-mcp-test-prompt.md b/tests/contextstream-mcp-test-prompt.md new file mode 100644 index 0000000..bea0777 --- /dev/null +++ b/tests/contextstream-mcp-test-prompt.md @@ -0,0 +1,616 @@ +# ContextStream MCP Server — Comprehensive Test Suite + +> **Usage:** Paste this entire prompt into a new Claude Code session with ContextStream MCP connected. +> Claude will execute all test cases, report pass/fail, and leave artifacts tagged `[CS-TEST]` for UI inspection. + +--- + +You are a QA engineer testing the ContextStream MCP server. Execute every test case below **in order**. For each test: + +1. Run the MCP tool call exactly as described +2. Record the result: **PASS** (expected behavior) or **FAIL** (error, missing data, wrong response) +3. If a test returns an ID, save it — later tests may reference it +4. **Do NOT clean up** test artifacts — leave them for manual UI inspection +5. All created artifacts MUST use the `[CS-TEST]` prefix in their title for easy identification and later cleanup + +After all suites complete, print a summary table with columns: `Suite | Test ID | Description | Result | Notes`. + +**Target workspace:** sim-steward / simhub-plugin +**Folder path:** `C:\Users\winth\dev\sim-steward\simhub-plugin` + +--- + +## Suite 1: Init & Context (3 tests) + +**T1.1 — Init session** +Call `init(folder_path="C:\\Users\\winth\\dev\\sim-steward\\simhub-plugin", client_name="claude")`. +- PASS if: response contains workspace name "sim-steward" and project "simhub-plugin" +- FAIL if: error, no workspace resolved, or wrong workspace + +**T1.2 — Context (fast mode)** +Call `context(user_message="ContextStream MCP test suite running", mode="fast")`. +- PASS if: returns without error +- FAIL if: error or timeout + +**T1.3 — Context (standard mode)** +Call `context(user_message="Testing context retrieval with lessons and preferences", mode="standard")`. +- PASS if: response includes any of: lessons, preferences, or contextstream rules +- FAIL if: empty response or error + +--- + +## Suite 2: Search (7 tests) + +**T2.1 — Keyword search** +Call `search(mode="keyword", query="DispatchAction", limit=3)`. +- PASS if: returns results with file paths ending in `.cs` +- FAIL if: 0 results or error + +**T2.2 — Pattern search (glob)** +Call `search(mode="pattern", query="*.csproj", limit=5)`. +- PASS if: returns file paths ending in `.csproj` +- FAIL if: 0 results or error + +**T2.3 — Semantic search** +Call `search(mode="semantic", query="incident detection and replay logic", limit=3)`. +- PASS if: returns results related to incident/replay code +- FAIL if: 0 results or error + +**T2.4 — Auto mode** +Call `search(mode="auto", query="WebSocket server binding", limit=3)`. +- PASS if: returns results (mode auto-selected) with file paths +- FAIL if: 0 results or error + +**T2.5 — Exhaustive search** +Call `search(mode="exhaustive", query="TODO", limit=10)`. +- PASS if: returns multiple results across different files +- FAIL if: 0 results or error + +**T2.6 — Search result file validation** +Take the first file path returned from T2.1. Use the `Read` tool to read that file at the returned line number. +- PASS if: file exists and contains the search term near the reported line +- FAIL if: file doesn't exist, or content doesn't match + +**T2.7 — Search with include_content** +Call `search(mode="keyword", query="class SimStewardPlugin", include_content=true, limit=1)`. +- PASS if: result includes actual code content (not just file paths) +- FAIL if: no content in response + +--- + +## Suite 3: Memory — Nodes (7 tests) ⚠️ KNOWN PAIN POINT + +> **Context:** Past sessions observed node IDs returned by `search` that 404'd on `get_node`. This suite specifically tests create→get round-trip integrity. + +**T3.1 — Create node (fact)** +Call `memory(action="create_node", node_type="fact", title="[CS-TEST] Round-trip test node", content="This is a test fact created by the ContextStream MCP test suite. Timestamp: {current ISO time}")`. +- PASS if: returns a node ID (UUID format) +- FAIL if: error +- **Save the returned node_id as `TEST_NODE_ID`** + +**T3.2 — Get node (immediate round-trip)** +Call `memory(action="get_node", node_id=TEST_NODE_ID)`. +- PASS if: returns the node with matching title "[CS-TEST] Round-trip test node" and content containing "test fact" +- FAIL if: 404 error or content mismatch — **THIS IS THE CRITICAL REGRESSION TEST** + +**T3.3 — Update node** +Call `memory(action="update_node", node_id=TEST_NODE_ID, content="Updated content: round-trip verification successful. Updated at: {current ISO time}")`. +- PASS if: update acknowledged without error +- FAIL if: error + +**T3.4 — Get node (verify update persisted)** +Call `memory(action="get_node", node_id=TEST_NODE_ID)`. +- PASS if: content now contains "Updated content: round-trip verification successful" +- FAIL if: still shows old content or 404 + +**T3.5 — List nodes (verify appears)** +Call `memory(action="list_nodes", node_type="fact")`. +- PASS if: test node with "[CS-TEST]" title appears in list +- FAIL if: not found in list despite successful creation + +**T3.6 — Supersede node** +Call `memory(action="supersede_node", node_id=TEST_NODE_ID, new_content="Superseded: this node was replaced during testing", reason="Test suite supersession")`. +- PASS if: returns new node ID and acknowledges supersession +- FAIL if: error +- **Save the returned new node_id as `TEST_NODE_ID_V2`** + +**T3.7 — Search → Get consistency check** ⚠️ +Call `memory(action="search", query="[CS-TEST] Round-trip test node")`. +Then for EACH node ID returned, call `memory(action="get_node", node_id=)`. +- PASS if: every ID returned by search resolves successfully via get_node +- FAIL if: any ID returns 404 — **this is the exact bug observed in past sessions** + +--- + +## Suite 4: Memory — Events (5 tests) + +**T4.1 — Create event** +Call `memory(action="create_event", event_type="note", title="[CS-TEST] Event round-trip test", content="Test event created by MCP test suite. Contains structured data:\n- key1: value1\n- key2: value2")`. +- PASS if: returns event ID +- FAIL if: error +- **Save as `TEST_EVENT_ID`** + +**T4.2 — Get event (round-trip)** +Call `memory(action="get_event", event_id=TEST_EVENT_ID)`. +- PASS if: returns event with matching title and content including "key1: value1" +- FAIL if: 404 or content mismatch + +**T4.3 — List events (verify appears)** +Call `memory(action="list_events")`. +- PASS if: test event with "[CS-TEST]" in title appears in the list +- FAIL if: not found + +**T4.4 — Update event** +Call `memory(action="update_event", event_id=TEST_EVENT_ID, title="[CS-TEST] Event round-trip test (UPDATED)", content="Updated event content with additional data:\n- key3: value3")`. +- PASS if: update acknowledged +- FAIL if: error + +**T4.5 — Get event (verify update)** +Call `memory(action="get_event", event_id=TEST_EVENT_ID)`. +- PASS if: title contains "(UPDATED)" and content contains "key3: value3" +- FAIL if: shows old content + +--- + +## Suite 5: Memory — Docs (7 tests) ⚠️ KNOWN PAIN POINT + +> **Context:** The user has experienced issues with documents not syncing between MCP creation and the ContextStream UI. This suite stress-tests doc CRUD and content fidelity. + +**T5.1 — Create doc** +Call `memory(action="create_doc", doc_type="general", title="[CS-TEST] Doc Sync Verification", content="# Test Document\n\nThis document tests ContextStream doc sync fidelity.\n\n## Section 1: Basic Content\nParagraph with **bold**, *italic*, and `code`.\n\n## Section 2: Code Block\n```csharp\npublic class TestClass {\n public void TestMethod() {\n Console.WriteLine(\"Hello from test\");\n }\n}\n```\n\n## Section 3: Table\n| Column A | Column B |\n|----------|----------|\n| val1 | val2 |\n\n## Section 4: List\n- Item 1\n- Item 2\n - Nested item\n- Item 3")`. +- PASS if: returns doc ID +- FAIL if: error +- **Save as `TEST_DOC_ID`** + +**T5.2 — Get doc by ID (content fidelity)** +Call `memory(action="get_doc", doc_id=TEST_DOC_ID)`. +- PASS if: ALL of these are true: + - Title matches "[CS-TEST] Doc Sync Verification" + - Content contains the code block with `public class TestClass` + - Content contains the table with "Column A" and "Column B" + - Content contains the nested list item +- FAIL if: any content is truncated, mangled, or missing — **this is the sync regression test** + +**T5.3 — Get doc by title query** +Call `memory(action="get_doc", doc_id="Doc Sync Verification")`. +- PASS if: returns the same doc as T5.2 (matching ID) +- FAIL if: not found or returns a different doc + +**T5.4 — Update doc (substantial change)** +Call `memory(action="update_doc", doc_id=TEST_DOC_ID, content="# Test Document (UPDATED)\n\nOriginal content replaced with updated version.\n\n## New Section\nThis section was added during the update test.\n\n## Preserved Formatting\n```python\ndef test():\n return 'updated'\n```\n\n| New Col A | New Col B | New Col C |\n|-----------|-----------|----------|\n| x | y | z |")`. +- PASS if: update acknowledged +- FAIL if: error + +**T5.5 — Get doc (verify update persisted)** +Call `memory(action="get_doc", doc_id=TEST_DOC_ID)`. +- PASS if: content contains "(UPDATED)" in title and "New Section" and the python code block +- FAIL if: still shows old content — **indicates sync/cache issue** + +**T5.6 — List docs (verify appears)** +Call `memory(action="list_docs")`. +- PASS if: test doc with "[CS-TEST]" title appears +- FAIL if: not found in listing + +**T5.7 — Large content doc** ⚠️ +Create a doc with >2000 characters of content: +Call `memory(action="create_doc", doc_type="general", title="[CS-TEST] Large Content Stress Test", content="{generate a ~2500 char markdown document with multiple sections, code blocks, tables, and lists}")`. +Then immediately `get_doc` by the returned ID. +- PASS if: returned content length matches what was sent (within 5% tolerance) +- FAIL if: content is truncated — **regression test for large doc sync** +- **Save as `TEST_LARGE_DOC_ID`** + +--- + +## Suite 6: Memory — Tasks (5 tests) + +**T6.1 — Create task** +Call `memory(action="create_task", title="[CS-TEST] Task round-trip test", priority="medium", description="Test task for MCP verification")`. +- PASS if: returns task ID +- FAIL if: error +- **Save as `TEST_TASK_ID`** + +**T6.2 — Get task (round-trip)** +Call `memory(action="get_task", task_id=TEST_TASK_ID)`. +- PASS if: returns task with matching title and priority "medium" +- FAIL if: 404 or mismatch + +**T6.3 — Update task status progression** +Call `memory(action="update_task", task_id=TEST_TASK_ID, task_status="in_progress")`. +Then call `memory(action="update_task", task_id=TEST_TASK_ID, task_status="completed")`. +- PASS if: both updates succeed, final status is "completed" +- FAIL if: either update fails + +**T6.4 — List tasks (verify appears)** +Call `memory(action="list_tasks")`. +- PASS if: test task appears in list +- FAIL if: not found + +**T6.5 — Create task with plan linkage** +Call `memory(action="create_task", title="[CS-TEST] Plan-linked task", priority="low", description="Task linked to test plan")`. +- PASS if: returns task ID (plan linkage optional — just verify task creation works standalone) +- FAIL if: error +- **Save as `TEST_PLAN_TASK_ID`** + +--- + +## Suite 7: Memory — Todos (5 tests) + +**T7.1 — Create todo** +Call `memory(action="create_todo", title="[CS-TEST] Todo round-trip test", todo_priority="medium")`. +- PASS if: returns todo ID +- FAIL if: error +- **Save as `TEST_TODO_ID`** + +**T7.2 — Get todo (round-trip)** +Call `memory(action="get_todo", todo_id=TEST_TODO_ID)`. +- PASS if: returns todo with matching title and priority +- FAIL if: 404 or mismatch + +**T7.3 — Update todo** +Call `memory(action="update_todo", todo_id=TEST_TODO_ID, todo_priority="high")`. +Then `get_todo` to verify. +- PASS if: priority updated to "high" +- FAIL if: still "medium" or error + +**T7.4 — Complete todo** +Call `memory(action="complete_todo", todo_id=TEST_TODO_ID)`. +Then `get_todo` to verify status. +- PASS if: status is "completed" +- FAIL if: still "pending" or error + +**T7.5 — List todos** +Call `memory(action="list_todos")`. +- PASS if: test todo appears in list +- FAIL if: not found + +--- + +## Suite 8: Memory — Diagrams (5 tests) + +**T8.1 — Create diagram** +Call `memory(action="create_diagram", diagram_type="flowchart", title="[CS-TEST] Diagram round-trip test", content="graph TD\n A[Start] --> B{Decision}\n B -->|Yes| C[Action 1]\n B -->|No| D[Action 2]\n C --> E[End]\n D --> E")`. +- PASS if: returns diagram ID +- FAIL if: error +- **Save as `TEST_DIAGRAM_ID`** + +**T8.2 — Get diagram (round-trip)** +Call `memory(action="get_diagram", diagram_id=TEST_DIAGRAM_ID)`. +- PASS if: returns diagram with mermaid content containing "graph TD" and "Decision" +- FAIL if: 404 or content missing + +**T8.3 — Update diagram** +Call `memory(action="update_diagram", diagram_id=TEST_DIAGRAM_ID, content="graph LR\n A[Updated Start] --> B[Updated End]")`. +- PASS if: update acknowledged +- FAIL if: error + +**T8.4 — Get diagram (verify update)** +Call `memory(action="get_diagram", diagram_id=TEST_DIAGRAM_ID)`. +- PASS if: content now contains "graph LR" and "Updated Start" +- FAIL if: still shows old content + +**T8.5 — List diagrams** +Call `memory(action="list_diagrams")`. +- PASS if: test diagram appears +- FAIL if: not found + +--- + +## Suite 9: Session Operations (8 tests) + +**T9.1 — Capture decision** +Call `session(action="capture", event_type="decision", title="[CS-TEST] Test decision capture", content="Decision: Use MCP-only verification for test suite. Rationale: Faster and fully autonomous.", importance="low")`. +- PASS if: capture acknowledged +- FAIL if: error + +**T9.2 — Capture lesson** +Call `session(action="capture_lesson", title="[CS-TEST] Test lesson", trigger="Running test suite", impact="Validates MCP server reliability", prevention="Regular test runs", severity="low", keywords=["testing", "cs-test"])`. +- PASS if: lesson captured +- FAIL if: error + +**T9.3 — Get lessons (verify)** +Call `session(action="get_lessons")`. +- PASS if: returns lessons (list may or may not include the just-created one depending on indexing) +- FAIL if: error + +**T9.4 — Remember** +Call `session(action="remember", content="[CS-TEST] The MCP test suite was last run at {current ISO time}")`. +- PASS if: acknowledged +- FAIL if: error + +**T9.5 — Recall** +Call `session(action="recall", query="[CS-TEST] MCP test suite last run")`. +- PASS if: returns results (may include the remembered fact) +- FAIL if: error + +**T9.6 — Capture plan** +Call `session(action="capture_plan", title="[CS-TEST] Test plan", steps=["Step 1: Initialize", "Step 2: Create artifacts", "Step 3: Verify round-trips", "Step 4: Report results"])`. +- PASS if: returns plan ID +- FAIL if: error +- **Save as `TEST_PLAN_ID`** + +**T9.7 — Get plan** +Call `session(action="get_plan", plan_id=TEST_PLAN_ID)`. +- PASS if: returns plan with 4 steps matching what was created +- FAIL if: 404 or steps missing + +**T9.8 — List plans** +Call `session(action="list_plans")`. +- PASS if: test plan with "[CS-TEST]" title appears in list +- FAIL if: not found + +--- + +## Suite 10: Project Operations (6 tests) + +**T10.1 — List projects** +Call `project(action="list")`. +- PASS if: returns at least one project +- FAIL if: empty or error + +**T10.2 — Get project** +Call `project(action="get")`. +- PASS if: returns project details for simhub-plugin +- FAIL if: error + +**T10.3 — Index status** +Call `project(action="index_status")`. +- PASS if: returns status (ready, indexing, or stale) +- FAIL if: error + +**T10.4 — Statistics** +Call `project(action="statistics")`. +- PASS if: returns statistics with file counts or similar metrics +- FAIL if: error + +**T10.5 — Files** +Call `project(action="files", page_size=5)`. +- PASS if: returns file list with paths +- FAIL if: error or empty + +**T10.6 — Recent changes** +Call `project(action="recent_changes", limit=3)`. +- PASS if: returns recent git commits +- FAIL if: error + +--- + +## Suite 11: Workspace Operations (3 tests) + +**T11.1 — List workspaces** +Call `workspace(action="list")`. +- PASS if: returns at least one workspace including "sim-steward" +- FAIL if: empty or error + +**T11.2 — Get workspace** +Call `workspace(action="get")`. +- PASS if: returns workspace details +- FAIL if: error + +**T11.3 — Index settings** +Call `workspace(action="index_settings")`. +- PASS if: returns settings (or permission error if not admin — note which) +- FAIL if: unexpected error + +--- + +## Suite 12: Graph Operations (4 tests) + +**T12.1 — Dependencies** +Call `graph(action="dependencies", target_id="SimStewardPlugin.cs", target_type="module")`. +- PASS if: returns dependency data (even if empty for this file) +- FAIL if: unexpected error (not a "no data" response) + +**T12.2 — Impact analysis** +Call `graph(action="impact", target_id="DispatchAction", target_type="function", change_type="modify_signature")`. +- PASS if: returns impact analysis results +- FAIL if: error + +**T12.3 — Usages** +Call `graph(action="usages", target_id="IRSDKSharper")`. +- PASS if: returns files/components that use IRSDKSharper +- FAIL if: error + +**T12.4 — Circular dependencies** +Call `graph(action="circular_dependencies", limit=5)`. +- PASS if: returns results (empty list is OK — means no circular deps) +- FAIL if: error + +--- + +## Suite 13: Skills (4 tests) ⚠️ KNOWN PAIN POINT + +> **Context:** `skill(import, format=auto)` has been known to split multi-section files into many personal skills. This suite tests safe CRUD. + +**T13.1 — List skills** +Call `skill(action="list")`. +- PASS if: returns skill list including known skills (e.g., "simsteward-deploy", "contextstream") +- FAIL if: empty or error + +**T13.2 — Get skill** +Pick the first skill ID from T13.1. Call `skill(action="get", skill_id=)`. +- PASS if: returns skill with instruction_body present +- FAIL if: missing instruction_body or error + +**T13.3 — Create skill** +Call `skill(action="create", name="cs-test-skill", title="[CS-TEST] Test Skill", description="Skill created by MCP test suite", instruction_body="# Test Skill\n\nThis skill does nothing. It exists to verify skill CRUD.\n\n## Steps\n1. Do nothing\n2. Report success", categories=["testing", "cs-test"], status="draft")`. +- PASS if: returns skill ID +- FAIL if: error +- **Save as `TEST_SKILL_ID`** + +**T13.4 — Get created skill (round-trip)** +Call `skill(action="get", skill_id=TEST_SKILL_ID)`. +- PASS if: returns skill with title "[CS-TEST] Test Skill" and instruction_body containing "This skill does nothing" +- FAIL if: 404, wrong content, or instruction_body missing + +--- + +## Suite 14: Reminders (4 tests) + +**T14.1 — Create reminder** +Call `reminder(action="create", title="[CS-TEST] Test reminder", content="Reminder created by MCP test suite", priority="low", remind_at="{ISO time 1 hour from now}")`. +- PASS if: returns reminder ID +- FAIL if: error +- **Save as `TEST_REMINDER_ID`** + +**T14.2 — List reminders** +Call `reminder(action="list")`. +- PASS if: test reminder appears in list +- FAIL if: not found + +**T14.3 — Active reminders** +Call `reminder(action="active")`. +- PASS if: returns list (test reminder may or may not appear depending on remind_at timing) +- FAIL if: error + +**T14.4 — Complete reminder** +Call `reminder(action="complete", reminder_id=TEST_REMINDER_ID)`. +- PASS if: completion acknowledged +- FAIL if: error + +--- + +## Suite 15: Instruct / RAM (4 tests) + +> **Note:** These require a session_id. Use `"cs-test-session"` as the session_id. + +**T15.1 — Push entries** +Call `instruct(action="push", session_id="cs-test-session", entries=[{"text": "[CS-TEST] Instruction 1: Always verify round-trips", "source": "test-suite"}, {"text": "[CS-TEST] Instruction 2: Check for 404s on get_node", "source": "test-suite", "critical": true}])`. +- PASS if: push acknowledged +- FAIL if: error + +**T15.2 — Get entries** +Call `instruct(action="get", session_id="cs-test-session")`. +- PASS if: returns the 2 pushed entries +- FAIL if: empty or error + +**T15.3 — Stats** +Call `instruct(action="stats", session_id="cs-test-session")`. +- PASS if: returns cache statistics +- FAIL if: error + +**T15.4 — Ack entries** +Take the entry IDs from T15.2. Call `instruct(action="ack", session_id="cs-test-session", ids=[, ])`. +- PASS if: acknowledgment succeeded +- FAIL if: error + +--- + +## Suite 16: Integration Status (2 tests) + +**T16.1 — All integrations status** +Call `integration(provider="all", action="status")`. +- PASS if: returns status for available integrations (connected or not) +- FAIL if: unexpected error + +**T16.2 — GitHub integration search (if connected)** +If T16.1 shows GitHub connected, call `integration(provider="github", action="repos")`. +- PASS if: returns repo list +- FAIL if: error (SKIP if GitHub not connected) + +--- + +## Suite 17: Help & Utility (4 tests) + +**T17.1 — List tools** +Call `help(action="tools")`. +- PASS if: returns tool list with 10+ tools +- FAIL if: empty or error + +**T17.2 — Auth check** +Call `help(action="auth")`. +- PASS if: returns current user info +- FAIL if: error + +**T17.3 — Version** +Call `help(action="version")`. +- PASS if: returns version string +- FAIL if: error + +**T17.4 — Team status** +Call `help(action="team_status")`. +- PASS if: returns team/subscription info (or "no team" — both valid) +- FAIL if: unexpected error + +--- + +## Suite 18: Stress & Edge Cases (6 tests) ⚠️ + +**T18.1 — Unicode in titles/content** +Call `memory(action="create_node", node_type="fact", title="[CS-TEST] Unicode: 日本語テスト 🏎️ émojis & spëcial «chars»", content="Content with unicode: ñ, ü, ø, 中文, العربية, backticks: \`code\`, pipes: |col1|col2|")`. +- PASS if: returns ID, and `get_node` returns content with all unicode preserved +- FAIL if: content mangled, truncated, or error +- **Save as `TEST_UNICODE_NODE_ID`** + +**T18.2 — Rapid sequential creates** +Create 3 nodes in rapid succession (no delay between calls): +1. `memory(action="create_node", node_type="fact", title="[CS-TEST] Rapid 1")` +2. `memory(action="create_node", node_type="fact", title="[CS-TEST] Rapid 2")` +3. `memory(action="create_node", node_type="fact", title="[CS-TEST] Rapid 3")` +Then `list_nodes` and verify all 3 appear. +- PASS if: all 3 created and all 3 retrievable via `get_node` +- FAIL if: any creation fails, any ID returns 404, or any missing from list + +**T18.3 — Invalid UUID handling** +Call `memory(action="get_node", node_id="00000000-0000-0000-0000-000000000000")`. +- PASS if: returns a clean 404 "not found" error +- FAIL if: crashes, hangs, or returns unexpected data + +**T18.4 — Search immediately after create** +Call `memory(action="create_node", node_type="fact", title="[CS-TEST] Immediate search target XYZ789", content="Unique content for immediate indexing test: QRS456")`. +Then immediately call `memory(action="search", query="XYZ789 QRS456")`. +- PASS if: the just-created node appears in search results +- FAIL if: not found — **indicates indexing lag** (note the delay if any) + +**T18.5 — Empty content handling** +Call `memory(action="create_node", node_type="fact", title="[CS-TEST] Empty content test", content="")`. +- PASS if: creates successfully (empty content allowed) or returns clear validation error +- FAIL if: crashes or ambiguous error + +**T18.6 — Memory search → get consistency** ⚠️ CRITICAL +Call `memory(action="search", query="[CS-TEST]")`. +For EVERY node ID returned in the search results, call `memory(action="get_node", node_id=)`. +- PASS if: 100% of search-returned IDs resolve via get_node +- FAIL if: ANY ID returns 404 — **this is the primary regression test for the known node ghost bug** +- Report: "{N} of {M} IDs resolved successfully" + +--- + +## Final Report + +After completing all suites, produce this output: + +### Summary Table + +``` +| Suite | Test | Description | Result | Notes | +|-------|------|------------------------------------------|--------|-------| +| 1 | T1.1 | Init session | ? | | +| 1 | T1.2 | Context (fast) | ? | | +| ... | ... | ... | ... | ... | +``` + +### Statistics +- Total tests: {N} +- Passed: {N} +- Failed: {N} +- Skipped: {N} + +### Known Pain Point Results +Highlight these specifically: +1. **Node 404 ghost bug (T3.7, T18.6):** PASS/FAIL — {details} +2. **Doc sync fidelity (T5.2, T5.5, T5.7):** PASS/FAIL — {details} +3. **Skill CRUD (T13.3, T13.4):** PASS/FAIL — {details} +4. **Search→Get consistency (T18.6):** PASS/FAIL — {N}/{M} resolved +5. **Immediate indexing (T18.4):** PASS/FAIL — {details} + +### Artifacts Left for UI Inspection +List all `[CS-TEST]` artifacts with their IDs and types: +``` +| Type | Title | ID | +|----------|------------------------------------------|------| +| node | [CS-TEST] Round-trip test node | ... | +| doc | [CS-TEST] Doc Sync Verification | ... | +| ... | ... | ... | +``` + +**To clean up:** In a future session, search for `[CS-TEST]` and delete all matching artifacts.