From 9892136a0ec4cb5cb4b306fe5de501bc487aa65a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=7BAI=7Df=20D=2E=20M=C3=BCller?= Date: Wed, 13 May 2026 22:12:04 +0200 Subject: [PATCH 1/2] feat: add socratic-code-theory-recovery Claude Code Skill (#473) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Package the brownfield Socratic Code-Theory Recovery workflow as an installable Claude Code Skill. Structure: - SKILL.md — when-to-use, contract overview, two-phase workflow diagram - prompts/phase-1-question-tree.md — copy-paste prompt for Phase 1, with post-prompt sanity-check and team-routing instructions - prompts/phase-2-synthesize.md — Phase 2 prompt that consumes the answered tree and produces PRD + Cockburn use cases + arc42 + Nygard ADRs with full Q-ID traceability - references/arc42.md — 12 sections as Q3 decomposition heuristic - references/cockburn-use-cases.md — fields as Q2 sub-questions, persona vs system use cases - references/iso-25010.md — 8 quality characteristics as Q4 sub-questions, with the mechanism-vs-target split - references/nygard-adrs.md — ADR format as Q3.9 sub-tree, what makes a decision architecturally significant, Pugh matrix guidance - references/output-schema.md — strict format for QUESTION_TREE.adoc and OPEN_QUESTIONS.adoc, including Q-ID scheme, [ANSWERED]/[OPEN] block formats, and Phase 2 traceability rules - references/examples.md — worked [ANSWERED] and [OPEN] leaves for each major branch (Q1-Q5) from a hypothetical Order Management context Reference snippets are embedded (per agreed scope) rather than linked, so the skill is usable offline once installed. End-to-end testing on a real codebase is deferred to a follow-up issue. Co-Authored-By: Claude Opus 4.7 (1M context) --- skill/socratic-code-theory-recovery/SKILL.md | 114 ++++++++++ .../prompts/phase-1-question-tree.md | 76 +++++++ .../prompts/phase-2-synthesize.md | 68 ++++++ .../references/arc42.md | 42 ++++ .../references/cockburn-use-cases.md | 57 +++++ .../references/examples.md | 196 ++++++++++++++++++ .../references/iso-25010.md | 51 +++++ .../references/nygard-adrs.md | 58 ++++++ .../references/output-schema.md | 153 ++++++++++++++ 9 files changed, 815 insertions(+) create mode 100644 skill/socratic-code-theory-recovery/SKILL.md create mode 100644 skill/socratic-code-theory-recovery/prompts/phase-1-question-tree.md create mode 100644 skill/socratic-code-theory-recovery/prompts/phase-2-synthesize.md create mode 100644 skill/socratic-code-theory-recovery/references/arc42.md create mode 100644 skill/socratic-code-theory-recovery/references/cockburn-use-cases.md create mode 100644 skill/socratic-code-theory-recovery/references/examples.md create mode 100644 skill/socratic-code-theory-recovery/references/iso-25010.md create mode 100644 skill/socratic-code-theory-recovery/references/nygard-adrs.md create mode 100644 skill/socratic-code-theory-recovery/references/output-schema.md diff --git a/skill/socratic-code-theory-recovery/SKILL.md b/skill/socratic-code-theory-recovery/SKILL.md new file mode 100644 index 0000000..25ec944 --- /dev/null +++ b/skill/socratic-code-theory-recovery/SKILL.md @@ -0,0 +1,114 @@ +--- +name: socratic-code-theory-recovery +description: Recover the "theory" (Naur 1985) of an existing codebase through recursive question refinement before writing documentation. Use on brownfield projects where the spec is missing — produces a Question Tree separating what is answerable from code (with evidence) from what must be asked of the team (routed by role). Phase 1 builds the tree; team answers the OPEN leaves; Phase 2 synthesizes PRD, Cockburn use cases, arc42 architecture, and Nygard ADRs from the answered tree. +metadata: + author: LLM-Coding + version: "0.1" + source: https://github.com/LLM-Coding/Semantic-Anchors +license: MIT +--- + +# Socratic Code-Theory Recovery + +Reverse-engineer a bounded context into documentation without hallucinating the parts the code cannot tell you. + +## When to use this skill + +Use this skill on a brownfield codebase when: + +- Documentation is missing, outdated, or untrusted. +- A change is about to be made and you need a spec before you can change safely. +- You want documentation that distinguishes code-derived facts from team-supplied context — auditable, not generated prose. +- You want to surface the *open questions* in the system, not just synthesize an answer the team has not seen. + +Do **not** use this skill when: + +- You are doing greenfield development — use the spec-driven workflow instead. +- The whole system needs to be documented at once — work one bounded context at a time. +- The code is not runnable — fix that first. + +## The contract + +This skill implements the *Socratic Code-Theory Recovery* contract from the Semantic Anchors project. The methodology rests on Peter Naur's 1985 paper *Programming as Theory Building*: a program's theory lives in the heads of its developers and cannot be fully captured in code alone. A documentation-recovery process that ignores this produces confident-looking prose that fills in the gaps with invention. + +The fix: model the gaps explicitly. Every question about the system is either `[ANSWERED]` from code (with file:line evidence) or `[OPEN]` (with a category and the role that must answer it). The OPEN leaves are the handoff to humans. + +## Two-phase workflow + +``` + ┌────────────────────────────────┐ + Phase 1 │ CODE ──► Question Tree │ + │ ├─ [ANSWERED] leaves│ + │ └─ [OPEN] leaves │ + └────────────────┬───────────────┘ + ▼ + ┌────────────────────────────────┐ + Between │ OPEN_QUESTIONS.adoc │ + │ ──► team (routed by role) │ + │ ──► answers fill in OPENs │ + └────────────────┬───────────────┘ + ▼ + ┌────────────────────────────────┐ + Phase 2 │ Answered tree ──► Docs │ + │ PRD · Cockburn UCs · arc42 · │ + │ Nygard ADRs (every claim Q-ID) │ + └────────────────────────────────┘ +``` + +### Phase 1: Build the Question Tree + +Use [prompts/phase-1-question-tree.md](prompts/phase-1-question-tree.md). Adapt the bounded-context path and any domain-specific Q1 examples; do not change the leaf classification, Q-ID scheme, or output files. + +Outputs: + +- `QUESTION_TREE.adoc` — the full hierarchical reasoning trace +- `OPEN_QUESTIONS.adoc` — only the `[OPEN]` leaves, grouped by Ask role + +Decomposition heuristics — use these Semantic Anchors as guides, not as rigid templates: + +- **arc42** — 12 architecture sub-questions (Q3 branch). See [references/arc42.md](references/arc42.md). +- **Cockburn Use Cases** — specification structure (Q2 branch). See [references/cockburn-use-cases.md](references/cockburn-use-cases.md). +- **ISO/IEC 25010** — 8 quality characteristics (Q4 branch). See [references/iso-25010.md](references/iso-25010.md). +- **Nygard ADRs** — design-rationale capture (Q3.9 branch). See [references/nygard-adrs.md](references/nygard-adrs.md). + +Leaf classification rules and Q-ID scheme: [references/output-schema.md](references/output-schema.md). + +Worked examples — one `[ANSWERED]` and one `[OPEN]` leaf for each major branch: [references/examples.md](references/examples.md). + +### Between Phases: Team answers the OPEN leaves + +Route `OPEN_QUESTIONS.adoc` to the people whose role appears in each section: Product Owner, Architect, Developer, Domain Expert, Operations. In one controlled experiment with a 13,000-line Go codebase, 11 targeted OPEN questions were enough to close the gap to the original documentation. + +Team answers are written **directly into `OPEN_QUESTIONS.adoc`** under each question, marked clearly. Do not call Phase 2 until every OPEN leaf has either an answer or an explicit `(deferred)` marker. + +### Phase 2: Synthesize documentation + +Use [prompts/phase-2-synthesize.md](prompts/phase-2-synthesize.md). The Phase 2 LLM reads the answered tree and produces: + +- **PRD** from the Q1 branch (problem, users, goals, success criteria) +- **Specification** from the Q2 branch (Cockburn use cases at User Goal level, system use cases for each technical interface, supplementary specifications) +- **arc42** with all 12 chapters from the Q3 branch +- **Nygard ADRs** with Pugh Matrix from the Q3.9 branch + +Every claim references a Q-ID. Team-supplied information is marked `(team answer)`. This dual traceability — code evidence plus team input — is the difference from a simple reverse-engineering prompt that fills in gaps silently. + +## What the LLM can and cannot recover + +A controlled experiment (deleting documentation from a greenfield project and regenerating it from code) showed: + +**Derivable from code**: functional requirements, acceptance criteria, building-block views, glossary, security mechanisms, crosscutting concepts. + +**NOT derivable from code**: business context, design rationale (the ADR "why"), quality-goal *priorities*, stakeholder concerns, aspirational features, performance budgets, tutorials, review results. + +If your synthesized documentation contains a claim from the second list without a `(team answer)` marker, the LLM hallucinated it. Mark it `[OPEN]` and ask the team. + +## Spec drift and reconciliation + +After this skill produces documentation, the implementation LLM will add security hardening, validation rules, and edge cases that are not in the spec. This is structural, not a discipline problem. Re-run Phase 1 against the current code periodically — before a release, after a security review, before onboarding — and diff against the existing spec. The diff reveals NEW (in code, not in spec), CHANGED (diverged), and DEAD (in spec, not in code). + +## Further reading + +- Peter Naur, *Programming as Theory Building* (1985). https://pages.cs.wisc.edu/~remzi/Naur.pdf +- Brownfield Workflow (Semantic Anchors). https://llm-coding.github.io/Semantic-Anchors/brownfield +- Brownfield Experiment Report. https://llm-coding.github.io/Semantic-Anchors/brownfield-experiment-report +- Fair Comparison Report (three recovery approaches). https://llm-coding.github.io/Semantic-Anchors/brownfield-fair-comparison diff --git a/skill/socratic-code-theory-recovery/prompts/phase-1-question-tree.md b/skill/socratic-code-theory-recovery/prompts/phase-1-question-tree.md new file mode 100644 index 0000000..4229c25 --- /dev/null +++ b/skill/socratic-code-theory-recovery/prompts/phase-1-question-tree.md @@ -0,0 +1,76 @@ +# Phase 1 Prompt: Build the Question Tree + +Copy the block below into a session that has read access to the bounded context. Replace `[bounded context path]` with the actual path. Adapt the Q1-Q5 examples if your domain has different starting concerns, but do not change the leaf classification, Q-ID scheme, or output files. + +``` +You are performing Socratic Code-Theory Recovery on a brownfield bounded +context located at [bounded context path]. Phase 1 of two. + +Goal: recover the program's theory (Naur, 1985) from source code through +recursive question refinement, before any documentation is written. + +Process: + +1. Start with five high-level questions about the bounded context: + Q1 What problem does this bounded context solve, and for whom? + Q2 What is the specification of this bounded context? + Q3 What is the architecture of this bounded context? + Q4 What quality goals drive the design? + Q5 What risks and technical debt exist? + +2. Decompose each question recursively. Use these Semantic Anchors as + decomposition guides: + - arc42 — 12 sub-questions for architecture (Q3 branch) + - Cockburn Use Cases — Primary Actor, Trigger, Main Success Scenario, + Extensions, Postconditions for specification (Q2 branch) + - ISO/IEC 25010 — 8 quality characteristics for quality goals (Q4 branch) + - Nygard ADRs — Context, Decision, Status, Consequences for design + rationale (Q3.9 branch) + Stop decomposing when a question is precise enough to be answered with a + single piece of code evidence or a single fact from a stakeholder. + +3. Assign a hierarchical Q-ID to every node (Q1, Q1.2, Q1.2.3, ...) so that + later documentation can cite back to it. + +4. For each leaf, classify it: + + [ANSWERED] + - You found the answer in the code. + - Cite the evidence as : or ::. + - Be exact. No "see X for details." + + [OPEN] + - The answer is not derivable from code alone. + - Category: business-context | design-rationale | quality-goals | + stakeholder-context | future-direction + - Ask role: Product Owner | Architect | Developer | Domain Expert | + Operations + - State precisely what cannot be answered, and why. + +5. Output two files in AsciiDoc: + + QUESTION_TREE.adoc + - Full hierarchical tree with all nodes and Q-IDs + - Each leaf marked [ANSWERED] (with evidence) or [OPEN] (with Category + and Ask role) + - Includes all reasoning, not only the leaves + + OPEN_QUESTIONS.adoc + - Only the [OPEN] leaves, copied verbatim from QUESTION_TREE.adoc + - Grouped by Ask role (one section per role) + - Each question short enough to be answered in 1-3 sentences + +Do not write any other documentation in this phase. Phase 2 will synthesize +the answered tree into PRD, specification, arc42, and ADRs — only after the +team has filled in the [OPEN] leaves. +``` + +## What to do after the prompt completes + +1. **Sanity-check `QUESTION_TREE.adoc`.** Pick three `[ANSWERED]` leaves at random and verify the cited file:line actually contains the claim. If any cite is wrong, the LLM is hallucinating evidence — re-run with a smaller bounded context. + +2. **Route `OPEN_QUESTIONS.adoc` to the team.** One section per Ask role. Typically 10-15 questions for a small bounded context; if you see 50+, the bounded context is too large. + +3. **Team writes answers directly into `OPEN_QUESTIONS.adoc`** under each question. Mark deferrals explicitly as `(deferred)` so Phase 2 can decide whether to leave them as gaps in the documentation. + +4. Only after every leaf has an answer or an explicit deferral, run Phase 2. diff --git a/skill/socratic-code-theory-recovery/prompts/phase-2-synthesize.md b/skill/socratic-code-theory-recovery/prompts/phase-2-synthesize.md new file mode 100644 index 0000000..a875d16 --- /dev/null +++ b/skill/socratic-code-theory-recovery/prompts/phase-2-synthesize.md @@ -0,0 +1,68 @@ +# Phase 2 Prompt: Synthesize Documentation + +Run this prompt only after every `[OPEN]` leaf in `OPEN_QUESTIONS.adoc` has either a team answer or an explicit `(deferred)` marker. + +``` +You are performing Phase 2 of Socratic Code-Theory Recovery. + +Inputs: +- QUESTION_TREE.adoc — the answered Question Tree from Phase 1. +- OPEN_QUESTIONS.adoc — same OPEN leaves, now with team answers (or + (deferred) markers) written under each question. + +Goal: synthesize documentation from the answered tree. Every claim must be +traceable to a Q-ID. Team-supplied facts must be marked (team answer). +Anything still marked (deferred) must remain an explicit gap in the output, +not be filled with invention. + +Produce four artifacts: + +1. docs/specs/prd-[context-name].adoc — Product Requirements Document + - Problem statement, target users, goals, success criteria, scope + boundaries, constraints, open questions + - Source: Q1 branch of QUESTION_TREE.adoc + - Anchor: PRD (Cagan / Pichler) + +2. docs/specs/use-cases-[context-name].adoc — Specification + - Persona Use Cases in Cockburn Fully Dressed format at User Goal level: + Primary Actor, Trigger, Stakeholders & Interests, Preconditions, + Main Success Scenario, Extensions, Postconditions, Business Rules. + - System Use Cases for each technical interface (API endpoint, CLI + command, event, file format): input + validation, processing, + output + status codes, error responses. + - Supplementary Specifications: Entity Model, State Machines, Interface + Contracts, Validation Rules. + - Gherkin acceptance criteria where applicable. + - Source: Q2 branch of QUESTION_TREE.adoc + - Anchor: Cockburn Use Cases + +3. docs/arc42/arc42-[context-name].adoc — Architecture + - All 12 arc42 chapters. Mark chapters with no content as + "No information from Phase 1" rather than fabricating content. + - Source: Q3 branch of QUESTION_TREE.adoc + - Anchor: arc42 (Starke / Hruschka) + +4. docs/specs/adrs/*.adoc — one ADR per significant design decision + - Nygard format: Title, Status, Context, Decision, Consequences. + - Include a Pugh Matrix listing the alternatives considered with a + 3-point scale (-1, 0, +1) against the quality goals from Q4. + - Source: Q3.9 branch of QUESTION_TREE.adoc + - Anchor: ADR according to Nygard + +Rules for traceability: +- Every paragraph references the Q-IDs that support it, in square brackets: + "The system uses Hexagonal Architecture [Q3.5]." +- Team-supplied facts get an inline marker: "Sessions expire after 24 hours + (team answer, Q3.4.2)." +- Deferred questions stay as explicit gaps: "Quality-goal priorities are + deferred (Q4.1.deferred) and must be resolved before the next release." +- Do not introduce facts that do not appear in QUESTION_TREE.adoc or + OPEN_QUESTIONS.adoc. If a Section feels under-specified, leave it + under-specified — that is signal, not a defect. +``` + +## After Phase 2 + +- **Spec drift starts immediately.** Re-run Phase 1 against the current code before each release; diff the new Question Tree against the existing documentation to surface NEW (in code, not in spec), CHANGED (diverged), and DEAD (in spec, not in code) findings. + +- **Extend bounded contexts incrementally.** Don't reverse-engineer the whole system in one pass. Pick the next bounded context only when the first one's documentation is being actively used. diff --git a/skill/socratic-code-theory-recovery/references/arc42.md b/skill/socratic-code-theory-recovery/references/arc42.md new file mode 100644 index 0000000..b925422 --- /dev/null +++ b/skill/socratic-code-theory-recovery/references/arc42.md @@ -0,0 +1,42 @@ +# arc42 — Decomposition Guide for Q3 (Architecture) + +arc42 is a 12-section template for documenting software architecture (Gernot Starke, Peter Hruschka). In this skill, the 12 sections serve as decomposition heuristics for the Q3 branch of the Question Tree — each section becomes a sub-question. + +## The 12 sections as Q3 sub-questions + +| Q-ID | Section | Sub-question(s) | +|------|---------|-----------------| +| Q3.1 | Introduction and Goals | What does the system do at the highest level? Which 3-5 quality goals drive design? Who are the most important stakeholders? | +| Q3.2 | Architecture Constraints | Which technical, organizational, conventional constraints restrict design choices? | +| Q3.3 | Context and Scope | What are the system's external interfaces — neighbours, channels, protocols? Business context vs technical context? | +| Q3.4 | Solution Strategy | Which fundamental decisions and patterns shape the architecture? Technology choices, top-level decomposition, quality-goal approaches, organizational decisions? | +| Q3.5 | Building Block View | How is the system decomposed into containers, components, classes? Static structure at multiple levels of zoom. | +| Q3.6 | Runtime View | How do components interact for the most important scenarios — startup, user-visible flows, error handling? | +| Q3.7 | Deployment View | Which hardware/infrastructure runs the system? Deployment topology, environments, mapping building blocks to infrastructure. | +| Q3.8 | Crosscutting Concepts | Domain models, architecture/design patterns used, persistence, UI, communication, plausibility checks, exception/error handling, logging, security, internationalisation, configurability? | +| Q3.9 | Architecture Decisions | Why was each significant decision made? Each becomes a Nygard ADR — see [nygard-adrs.md](nygard-adrs.md). | +| Q3.10 | Quality Requirements | Quality tree, quality scenarios (when/where/who/measurement). Connects to Q4 (ISO 25010). | +| Q3.11 | Risks and Technical Debt | Known technical risks, debt items, and their mitigation status. Overlaps with Q5. | +| Q3.12 | Glossary | Domain terminology — terms the team uses with project-specific meaning. | + +## Decomposition hints + +- **Q3.1 Quality Goals** is *almost always* `[OPEN]` — priorities live in stakeholder heads, not code. Don't fake a ranking from package structure. +- **Q3.4 Solution Strategy** and **Q3.9 Architecture Decisions** are the *why* of the system. Code shows *what* was decided; the *why* is `[OPEN]` unless ADRs or commit messages explain it. +- **Q3.5 Building Block View** is the most code-derivable section. Walk packages/modules and trace dependencies. +- **Q3.6 Runtime View** is partially derivable — entry points, request flows. Error scenarios are often `[OPEN]` because the team's *intent* differs from what happens to compile. +- **Q3.11 Risks/Tech Debt** is `[OPEN]` unless TODO/FIXME comments are systematically maintained. Recent bug fixes and reverts often hint at debt the team already knows about. + +## When to stop decomposing + +A Q3 sub-question is fine-grained enough to be a leaf when: + +- It can be answered with a single file:line reference, or +- It cannot be answered at all from code (mark `[OPEN]` with category and role). + +Avoid making sub-questions like "How does the system handle errors?" — too broad. Prefer "What happens when `OrderService.create()` is called with a duplicate idempotency key?" — answerable. + +## Reference + +- Project: https://arc42.org/ +- Anchor in the catalog: https://llm-coding.github.io/Semantic-Anchors/anchor/arc42 diff --git a/skill/socratic-code-theory-recovery/references/cockburn-use-cases.md b/skill/socratic-code-theory-recovery/references/cockburn-use-cases.md new file mode 100644 index 0000000..7d4d475 --- /dev/null +++ b/skill/socratic-code-theory-recovery/references/cockburn-use-cases.md @@ -0,0 +1,57 @@ +# Cockburn Use Cases — Decomposition Guide for Q2 (Specification) + +Alistair Cockburn's *Fully Dressed* use-case format gives the Q2 branch a precise structure. Each user-goal-level use case becomes a sub-tree of Q2; the format's mandatory fields are the leaves. + +## The fields as Q2 sub-questions + +For each use case the LLM identifies in the code (from controller methods, CLI commands, API endpoints): + +| Field | Sub-question | Code source | +|-------|--------------|-------------| +| **Primary Actor** | Who triggers this use case? | Authentication, role guards, API consumers | +| **Stakeholders & Interests** | Who else cares about the outcome, and what do they want? | Often `[OPEN]` — code doesn't show interests | +| **Preconditions** | What must be true before the use case can start? | Input validation, guards, state checks | +| **Trigger** | What event starts the use case? | Endpoint match, CLI invocation, event handler | +| **Main Success Scenario** | What is the numbered sequence of steps on the happy path? | The function body, controller flow | +| **Extensions** | What alternative or exceptional flows exist, branching from which step? | Error branches, edge-case `if` blocks, catch clauses | +| **Postconditions** | What must be true when the use case completes successfully? | Persisted state, emitted events, response shape | +| **Business Rules** | What domain invariants does the use case enforce? | Often `[OPEN]` — the rule is in the code, the *reason* for the rule is in the team's head | + +## Goal levels + +Cockburn distinguishes three goal levels: + +- **Summary / Cloud** — multiple user goals in a larger flow ("complete a purchase") +- **User Goal / Sea Level** — one sitting, one user, one outcome ("place an order") +- **Subfunction / Fish** — smaller actions reused by user-goal use cases ("validate address") + +The Q2 branch should contain *one sub-tree per User-Goal-level use case*. Subfunctions appear inline in the Extensions or in Supplementary Specifications. Summary-level flows are usually `[OPEN]` because they describe business processes, not technical flows. + +## System use cases vs persona use cases + +Persona use cases describe user-visible behaviour. System use cases describe technical interfaces (API endpoint contracts, CLI argument grammars, event payloads, file formats). Both belong in Q2 but in separate sub-branches: + +- `Q2.PUC.*` — persona use cases (user goal level) +- `Q2.SUC.*` — system use cases (one per technical interface) + +System use cases are *more* code-derivable than persona use cases because the interface contract is the code. Persona use cases require domain knowledge that is often `[OPEN]`. + +## What is typically [OPEN] + +- *Why* a business rule exists (the rule itself is code-derivable) +- Stakeholder interests beyond the obvious primary actor +- Preconditions that depend on prior workflow steps not modelled in the bounded context +- Postconditions that involve external systems +- Quality requirements per use case (latency, throughput) — Q4 territory + +## When to stop decomposing + +A Q2 leaf is fine-grained enough when it asks about one field of one use case: + +- "What is the Main Success Scenario of `Q2.PUC.PlaceOrder`?" → leaf +- "What does the system do when an order is placed?" → still needs decomposition + +## Reference + +- Cockburn, *Writing Effective Use Cases* (Addison-Wesley, 2001). +- Anchor in the catalog: https://llm-coding.github.io/Semantic-Anchors/anchor/cockburn-use-cases diff --git a/skill/socratic-code-theory-recovery/references/examples.md b/skill/socratic-code-theory-recovery/references/examples.md new file mode 100644 index 0000000..5590d6c --- /dev/null +++ b/skill/socratic-code-theory-recovery/references/examples.md @@ -0,0 +1,196 @@ +# Worked Examples — [ANSWERED] and [OPEN] Leaves + +Concrete examples for each major branch. Use these as templates when phrasing your own leaves. All examples are from a hypothetical *Order Management* bounded context in a small e-commerce backend. + +## Q1 — Problem and Users + +### Q1.1: Who is the primary user of this bounded context? + +``` +[ANSWERED] +Evidence: src/auth/Role.java:8 (enum entries), src/api/OrderController.java:18 (@PreAuthorize("hasRole('MANAGER')")) +Users with role MANAGER (Sales Managers) place orders on behalf of +end customers. Customers themselves never interact with this bounded +context directly. +``` + +### Q1.3: What problem does placing-orders-on-behalf-of-customers solve? + +``` +[OPEN] +Category: business-context +Ask role: Product Owner, Domain Expert +The code shows the workflow but not the motivation. Is this a phone-sales +channel, a B2B account-management channel, or migration from a legacy +system that did the same? Affects how Q2 use cases should be structured. +``` + +## Q2 — Specification + +### Q2.PUC.PlaceOrder.Trigger + +``` +[ANSWERED] +Evidence: src/api/OrderController.java:18-25 +A Sales Manager triggers the use case by submitting POST /orders with +JSON body { customerId, items[], shippingAddress }. Authentication is +session-based via the existing Auth filter. +``` + +### Q2.PUC.PlaceOrder.Postconditions + +``` +[OPEN] +Category: business-context +Ask role: Product Owner, Domain Expert +Code persists status=PENDING and publishes OrderCreated, but no transition +from PENDING is visible in this bounded context (src/service/OrderService.java +line 92 is the last touch). Is PENDING a final state for this bounded +context — i.e. fulfilment lives in a different bounded context — or is the +transition missing? The answer changes whether this use case's Postconditions +should mention "order is queued for fulfilment" or "order is created and +visible to the manager". +``` + +### Q2.SUC.CreateOrderEndpoint.ErrorResponses + +``` +[ANSWERED] +Evidence: src/api/OrderController.java:30-58, src/api/ApiExceptionHandler.java:42-71 +The endpoint returns: +- 201 Created with order id on success +- 400 Bad Request with field-level errors when validation fails (handled + in OrderController, lines 30-48) +- 404 Not Found with message "customer not found" when customerId resolves + to no Customer (lines 50-55) +- 409 Conflict when inventory reservation fails (catches + InventoryUnavailableException, lines 56-58) +- 500 Internal Server Error from the global handler in ApiExceptionHandler + for unexpected failures +``` + +## Q3 — Architecture + +### Q3.5: How is the bounded context decomposed into modules? + +``` +[ANSWERED] +Evidence: src/api/, src/service/, src/persistence/, src/integration/, src/domain/ +Five-package decomposition following a hexagonal-architecture-like layout: +- api/ — HTTP controllers, request/response DTOs +- service/ — application services (OrderService, ValidationService) +- domain/ — entities (Order, OrderLine, Customer reference) +- persistence/ — JPA repositories and entity adapters +- integration/ — adapters to Inventory, Pricing, Notification services +Dependencies point inward: api → service → domain. persistence and +integration are adapters depended on by service via ports defined in domain. +``` + +### Q3.1: Which 3-5 quality goals drive the design? + +``` +[OPEN] +Category: quality-goals +Ask role: Architect, Product Owner +Code shows mechanisms for several characteristics (idempotency keys, +optimistic locking, audit logging) but does not reveal the priority +ranking. A clear top-3 from Architect + Product Owner is required to +proceed with the Q4 sub-tree and to fill the Pugh matrices in Q3.9 ADRs. +``` + +## Q3.9 — Architecture Decisions (ADRs) + +### Q3.9.HexagonalArchitecture.Decision + +``` +[ANSWERED] +Evidence: src/domain/OrderRepositoryPort.java, src/persistence/OrderRepositoryAdapter.java, src/integration/InventoryClientPort.java +The system uses Hexagonal Architecture. Domain defines ports (interfaces); +adapters in persistence/ and integration/ implement them; service/ depends +on the ports, not the adapters. +``` + +### Q3.9.HexagonalArchitecture.Context + +``` +[OPEN] +Category: design-rationale +Ask role: Architect +The code shows the choice was made, but not why. Was the prior layered +architecture causing test pain? Was the team anticipating multiple +persistence options? Was this driven by an architecture standard outside +this repo? Affects whether the ADR should recommend continuing the +pattern in adjacent bounded contexts. +``` + +### Q3.9.HexagonalArchitecture.PughMatrix + +``` +[OPEN] +Category: design-rationale +Ask role: Architect +Alternatives the LLM can list from common practice: Layered Architecture, +Clean Architecture, modular monolith without hexagonal ports. Need the +architect to confirm which were actually considered, score them against +the Q4 priorities once those are answered, and explain the choice. +``` + +## Q4 — Quality Goals + +### Q4.6: How is Security implemented? + +``` +[ANSWERED] +Evidence: src/auth/AuthFilter.java, src/auth/Role.java, src/api/OrderController.java:18 (@PreAuthorize) +- AuthN: session-based, validated in AuthFilter +- AuthZ: role-based via Spring @PreAuthorize annotations on controllers +- Transport: HTTPS termination is external (no TLS code in this repo) +- Audit: no audit logging in this bounded context +``` + +### Q4.6.target: What is the Security target for this bounded context? + +``` +[OPEN] +Category: quality-goals +Ask role: Architect, Operations +Mechanisms are visible but the target is not. Specifically: +- Is audit logging required, and at what granularity (every state change? + only failed authZ?) +- Are there compliance frameworks in scope (PCI, GDPR, SOC2)? +- What is the threat model — internal abuse, external attackers, both? +Answers affect whether Q3.11 (Risks) flags absent audit logging as debt. +``` + +## Q5 — Risks and Technical Debt + +### Q5.2: What test debt exists? + +``` +[ANSWERED] +Evidence: src/service/OrderService.java::create (66 lines, cyclomatic complexity 14), test/service/OrderServiceTest.java (3 happy-path tests, no edge-case coverage) +OrderService.create has 14 branches; only the happy path plus two +validation-failure paths have tests. Inventory-failure, persistence-failure, +and notification-failure branches are untested. Adding tests is straightforward +once the team confirms intended behaviour for each branch (see Q2.SUC). +``` + +### Q5.4: What technical debt is the team already tracking? + +``` +[OPEN] +Category: future-direction +Ask role: Developer, Architect +No TODO/FIXME/HACK comments in this bounded context, no ADRs marking +decisions as "Superseded", no `.deprecated` package. Either the team has +no tracked debt (unlikely) or the tracking lives elsewhere (issue tracker, +team wiki). Need a pointer to where debt is recorded. +``` + +## Patterns to copy + +- **State the source of the answer explicitly.** "Evidence: file:line" is short, scannable, and verifiable. +- **One claim per leaf.** If a leaf says "and also...", split it. +- **Make `[OPEN]` actionable.** A team member should be able to answer the question without reading the code first. State enough context that the question stands alone. +- **Don't over-decompose `[ANSWERED]` leaves.** Once you can cite file:line and write a 1-3 sentence answer, stop. +- **Don't under-decompose `[OPEN]` leaves.** If the answer would require multiple paragraphs from the team, the question is too broad; split it. diff --git a/skill/socratic-code-theory-recovery/references/iso-25010.md b/skill/socratic-code-theory-recovery/references/iso-25010.md new file mode 100644 index 0000000..904d67a --- /dev/null +++ b/skill/socratic-code-theory-recovery/references/iso-25010.md @@ -0,0 +1,51 @@ +# ISO/IEC 25010 — Decomposition Guide for Q4 (Quality Goals) + +ISO/IEC 25010 (SQuaRE Product Quality Model) defines 8 quality characteristics with sub-characteristics. In this skill they are the decomposition heuristic for the Q4 branch — each characteristic becomes a sub-question. + +## The 8 characteristics as Q4 sub-questions + +| Q-ID | Characteristic | Sub-characteristics | What to look for in code | +|------|---------------|---------------------|--------------------------| +| Q4.1 | **Functional Suitability** | Completeness, Correctness, Appropriateness | Tests covering all use cases; correctness assertions; whether the implementation matches the documented scope | +| Q4.2 | **Performance Efficiency** | Time behaviour, Resource utilization, Capacity | Caching, indexes, async patterns, batch sizes, rate limits, connection pools | +| Q4.3 | **Compatibility** | Co-existence, Interoperability | Adapter layers, schema versioning, content-type negotiation, integration tests | +| Q4.4 | **Usability** | Appropriateness recognisability, Learnability, Operability, User error protection, UI aesthetics, Accessibility | Mostly `[OPEN]` for backend systems; UI repos: a11y attributes, error messages, help text | +| Q4.5 | **Reliability** | Maturity, Availability, Fault tolerance, Recoverability | Retry logic, circuit breakers, health checks, transaction boundaries, idempotency keys | +| Q4.6 | **Security** | Confidentiality, Integrity, Non-repudiation, Authenticity, Accountability | AuthN/AuthZ, encryption at rest/in transit, audit logging, input sanitisation, dependency scanning | +| Q4.7 | **Maintainability** | Modularity, Reusability, Analysability, Modifiability, Testability | Module boundaries, dependency direction, test coverage, code metrics if present | +| Q4.8 | **Portability** | Adaptability, Installability, Replaceability | Configuration externalisation, environment abstraction, infrastructure-as-code, dependency injection of platform services | + +## Quality in Use (the second model) + +The full 25010 standard also defines a Quality-in-Use model: Effectiveness, Efficiency, Satisfaction, Freedom from Risk, Context Coverage. These are almost always `[OPEN]` — they describe how users *experience* the system, which is not derivable from code. Include them as Q4.QiU.* sub-questions to surface the gap explicitly. + +## What is typically [ANSWERED] vs [OPEN] + +| Characteristic | Typically derivable | Typically open | +|----------------|--------------------|--------------:| +| Functional Suitability | Test coverage, assertions | *Priority* of correctness vs. speed | +| Performance Efficiency | Mechanism (cache, async) | Target thresholds, budgets | +| Compatibility | Adapter presence | Which versions are supported | +| Usability | a11y attrs in UI repos | UX research findings, persona fit | +| Reliability | Mechanism (retry, circuit) | Target SLOs, incident history | +| Security | Mechanism (encryption, auth) | Threat model, compliance scope | +| Maintainability | Module structure, test setup | Team velocity, change-failure rate | +| Portability | Config externalisation | Target environments, vendor constraints | + +**Rule of thumb**: the *mechanism* is in the code; the *target* and *priority* are in stakeholders' heads. Most Q4 leaves split into `[ANSWERED]` mechanism + `[OPEN]` target. + +## Quality goal *priority* is the highest-value open question + +The single most useful question for the team is "which 3-5 quality characteristics are top priority, and what is the rank order?" This shapes every architecture trade-off and is the input to arc42 Q3.1 and to ADR Pugh matrices. Always include it as `[OPEN]` Q4.0 with Ask role *Architect + Product Owner*. + +## When to stop decomposing + +A Q4 leaf is fine-grained enough when it asks about one sub-characteristic with measurable scope: + +- "What latency target does the order-creation endpoint meet?" → leaf +- "Is the system fast?" → still needs decomposition + +## Reference + +- ISO/IEC 25010:2011 — Systems and software engineering — Systems and software Quality Requirements and Evaluation (SQuaRE). +- Anchor in the catalog: https://llm-coding.github.io/Semantic-Anchors/anchor/iso-25010 diff --git a/skill/socratic-code-theory-recovery/references/nygard-adrs.md b/skill/socratic-code-theory-recovery/references/nygard-adrs.md new file mode 100644 index 0000000..ae05f50 --- /dev/null +++ b/skill/socratic-code-theory-recovery/references/nygard-adrs.md @@ -0,0 +1,58 @@ +# Nygard ADRs — Decomposition Guide for Q3.9 (Design Rationale) + +Michael Nygard's Architecture Decision Records (ADRs) capture *why* a design choice was made. In this skill ADRs form the Q3.9 branch — one sub-tree per architecturally significant decision. + +## The Nygard format as Q3.9 sub-questions + +| Field | Sub-question | Code derivability | +|-------|--------------|-------------------| +| **Title** | What is the decision in one short noun phrase? | `[ANSWERED]` — what was chosen is in the code | +| **Status** | Proposed / Accepted / Deprecated / Superseded by? | `[ANSWERED]` if the code reflects an active choice | +| **Context** | What forces led to the decision? | Usually `[OPEN]` — context is in stakeholders' heads, ticket history, or off-list discussions | +| **Decision** | What is the chosen approach? | `[ANSWERED]` — visible in code structure | +| **Consequences** | What follows from the decision — positive and negative? | Partially `[ANSWERED]` (visible coupling, complexity), partially `[OPEN]` (long-term implications) | + +## How to identify "architecturally significant" decisions + +A decision belongs in Q3.9 when at least one of these holds: + +- It is structurally hard to reverse (would require rewriting more than one module). +- It directly trades off two or more quality goals from Q4. +- It commits the project to an external dependency, vendor, or paradigm. + +If a decision is *easy to reverse* (a function-level pattern, a variable name), it does not belong in an ADR. + +Typical candidates the LLM can spot from code: + +- Language and framework choices +- Persistence approach (SQL vs document, ORM choice, schema migration tooling) +- Communication style (sync REST vs async events vs RPC) +- Authentication and authorisation approach +- Build and packaging strategy (mono vs poly repo, deployment unit shape) +- Top-level decomposition pattern (layered, hexagonal, clean, modular monolith, microservices) + +## Pugh Matrix for alternatives + +For each ADR, include a Pugh Matrix listing the alternatives considered. Use a 3-point scale (-1, 0, +1) and score each alternative against the quality goals from Q4. The chosen alternative is the column with the highest sum; alternatives with similar scores should produce an explicit explanation in the Decision field. + +When code does not show what alternatives were considered (almost always), mark the alternative rows `[OPEN]` with Ask role *Architect*. Even partial answers are valuable — "we considered X but rejected it because Y" is the single most useful piece of an ADR. + +## What is typically [OPEN] + +The Context and the alternatives are almost always `[OPEN]`: + +- *Why* was this approach chosen, not the alternative? +- What constraints (legal, organisational, timeline) shaped the decision? +- Who made the decision, and is it still binding? +- Is the decision being revisited, deprecated, or superseded? + +Status sometimes becomes `[OPEN]` for older systems where the decision was never reaffirmed but the code still reflects it. + +## When to stop decomposing + +Each ADR is a Q3.9 sub-tree with five leaves (one per Nygard field). Don't decompose further; if a field is too broad for one leaf, the decision itself is too broad and should be split into multiple ADRs. + +## Reference + +- Michael Nygard, *Documenting Architecture Decisions* (2011). https://www.cognitect.com/blog/2011/11/15/documenting-architecture-decisions +- Anchor in the catalog: https://llm-coding.github.io/Semantic-Anchors/anchor/adr-according-to-nygard diff --git a/skill/socratic-code-theory-recovery/references/output-schema.md b/skill/socratic-code-theory-recovery/references/output-schema.md new file mode 100644 index 0000000..5b2e600 --- /dev/null +++ b/skill/socratic-code-theory-recovery/references/output-schema.md @@ -0,0 +1,153 @@ +# Output Schema — QUESTION_TREE.adoc and OPEN_QUESTIONS.adoc + +Two AsciiDoc files. The schema is rigid enough to be machine-checkable but written in plain prose so a team member can read it. + +## QUESTION_TREE.adoc + +Hierarchical tree, top-down. Each node has a Q-ID, the question, and (for leaves) either an `[ANSWERED]` or `[OPEN]` marker. + +```asciidoc += Question Tree — [Context Name] +:doctype: book + +== Q1: What problem does this bounded context solve, and for whom? + +=== Q1.1: Who is the primary user? +[ANSWERED] +Evidence: src/auth/User.java:42 (role enum), src/api/OrderController.java:18 (@PreAuthorize) +Sales-team users with role MANAGER place orders on behalf of customers. + +=== Q1.2: What outcome does the user want? +[OPEN] +Category: business-context +Ask role: Product Owner +The code shows order creation succeeds when validation passes, but does not +reveal what success means for the user (revenue? margin? cycle time?). Need +explicit goal statement. + +== Q2: What is the specification of this bounded context? + +=== Q2.PUC.PlaceOrder: Persona use case — Place an order +==== Q2.PUC.PlaceOrder.Actor +[ANSWERED] +Evidence: src/api/OrderController.java:18-25 +Primary Actor: Sales Manager (role MANAGER) + +==== Q2.PUC.PlaceOrder.Trigger +[ANSWERED] +Evidence: src/api/OrderController.java:18 (POST /orders) +Sales Manager submits POST /orders with order payload. + +==== Q2.PUC.PlaceOrder.MainSuccess +[ANSWERED] +Evidence: src/service/OrderService.java::create (lines 45-92) +1. System validates payload (Q2.PUC.PlaceOrder.Validation). +2. System reserves inventory via InventoryClient. +3. System persists Order with status PENDING. +4. System publishes OrderCreated event. +5. System returns 201 Created with order id. + +==== Q2.PUC.PlaceOrder.Postconditions +[OPEN] +Category: business-context +Ask role: Product Owner, Domain Expert +Code persists status=PENDING but never transitions it. Is PENDING a final +state for this bounded context, or is downstream fulfilment expected to +move it forward? Affects what counts as success. + +(...continues for Q3, Q4, Q5...) +``` + +### Q-ID scheme + +- `Q1`, `Q2`, ... — the five top-level questions +- `Q1.1`, `Q1.2`, ... — direct decompositions +- `Q3.5.2` — arbitrary depth +- Within named sub-trees, use a stable label between dots so cites are stable across reruns: + - `Q2.PUC.PlaceOrder.Trigger` — persona use case PlaceOrder, field Trigger + - `Q2.SUC.CreateOrderEndpoint.ErrorResponses` — system use case for the create endpoint, ErrorResponses field + - `Q3.9.HexagonalArchitecture.Context` — ADR HexagonalArchitecture, Context field + +### `[ANSWERED]` block format + +``` +[ANSWERED] +Evidence: :[, : ...] + +``` + +- **Evidence is mandatory.** No exception. A claim without evidence is `[OPEN]`, not `[ANSWERED]`. +- File paths are relative to the bounded context root. +- Use `file:line` for a specific line, `file::function` for a function regardless of line drift, `file` for a whole-file claim. +- Keep the prose factual. The answer is the *what*; the *why* belongs in a separate `[OPEN]` leaf if it isn't in the code. + +### `[OPEN]` block format + +``` +[OPEN] +Category: +Ask role: + +``` + +- **Category** classifies the type of knowledge needed. Used to route questions and to detect bias in the gap distribution. +- **Ask role** is the role-class of person who can answer, not a named individual. Multiple roles are fine — list them in order of best to ask. +- The body of the leaf must explain *why* the code can't answer it. If the body says "the code doesn't say", widen it: "the code persists status=PENDING but never transitions it" is a real reason; "we don't know" is not. + +## OPEN_QUESTIONS.adoc + +A flat, role-grouped projection of every `[OPEN]` leaf. This is the handoff document — the team sees only their section. + +```asciidoc += Open Questions — [Context Name] +:doctype: book + +== For Product Owner + +=== Q1.2: What outcome does the user want? +Category: business-context + +The code shows order creation succeeds when validation passes, but does not +reveal what success means for the user (revenue? margin? cycle time?). + +*Your answer:* +_(write here)_ + +=== Q2.PUC.PlaceOrder.Postconditions +Category: business-context + +Code persists status=PENDING but never transitions it. Is PENDING a final +state for this bounded context, or is downstream fulfilment expected to +move it forward? + +*Your answer:* +_(write here)_ + +== For Architect + +(...) + +== For Domain Expert + +(...) +``` + +### Rules + +- One section per Ask role. +- A leaf with multiple Ask roles is duplicated under each role's section — make it explicit to whichever role reads first. +- The `*Your answer:*` block is mandatory. Team members write directly into the file. Phase 2 reads this file together with `QUESTION_TREE.adoc`. +- A deferred question gets `(deferred — )` instead of an answer. Phase 2 treats deferred questions as explicit gaps, not as filled-in answers. + +## Phase 2 traceability + +After Phase 2, every paragraph in the synthesized documentation cites at least one Q-ID: + +``` +The system uses Hexagonal Architecture [Q3.9.HexagonalArchitecture]. Sessions +expire after 24 hours (team answer, Q3.8.Security.SessionLifetime). +Quality-goal priorities are deferred (Q4.0.deferred) and must be resolved +before the next release. +``` + +This is the auditable trace from documentation back to either code evidence or a team answer. Anything without a Q-ID is invention. From c57e929389c371e1a058ce0241ada41e2302b291 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=7BAI=7Df=20D=2E=20M=C3=BCller?= Date: Wed, 13 May 2026 22:38:20 +0200 Subject: [PATCH 2/2] docs: integrate Socratic Recovery Skill into the website (#473) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Companion to the skill package itself: - New page docs/socratic-recovery-skill.adoc (EN+DE) with per-tool installation instructions for Claude Code, Codex, Cursor, GitHub Copilot, Gemini CLI, and Amazon Kiro. Same per-tool pattern as agentskill.adoc. - Wire the new page through scripts/render-docs.js, scripts/prerender-routes.js, website/src/main.js (renderSocraticRecoverySkillPage), and the route title map in website/src/utils/router.js. Reachable at /socratic-recovery-skill in both languages. - Add a [TIP] pointer from docs/brownfield-workflow.adoc (EN+DE) Phase 0.5 to the new skill page so users who reach the workflow find the install path immediately. - Add a "See also" entry to docs/agentskill.adoc (EN+DE) so the existing AgentSkill page also surfaces the new skill. - Add a [NOTE] callout at the top of docs/spec-driven-workflow.adoc (EN+DE) pointing brownfield users to the Brownfield Workflow and the skill — previously brownfield was only discoverable at the very bottom of the greenfield page. Build now pre-renders 14 routes (was 13). Co-Authored-By: Claude Opus 4.7 (1M context) --- docs/agentskill.adoc | 4 + docs/agentskill.de.adoc | 4 + docs/brownfield-workflow.adoc | 5 + docs/brownfield-workflow.de.adoc | 5 + docs/socratic-recovery-skill.adoc | 206 +++++++++++++++++++++++++++ docs/socratic-recovery-skill.de.adoc | 206 +++++++++++++++++++++++++++ docs/spec-driven-workflow.adoc | 5 + docs/spec-driven-workflow.de.adoc | 5 + scripts/prerender-routes.js | 7 + scripts/render-docs.js | 9 ++ website/src/main.js | 10 ++ website/src/utils/router.js | 1 + 12 files changed, 467 insertions(+) create mode 100644 docs/socratic-recovery-skill.adoc create mode 100644 docs/socratic-recovery-skill.de.adoc diff --git a/docs/agentskill.adoc b/docs/agentskill.adoc index 95ced31..44c07b9 100644 --- a/docs/agentskill.adoc +++ b/docs/agentskill.adoc @@ -133,3 +133,7 @@ The full catalog with descriptions, proponents, and core concepts is available a When you add a new anchor to the catalog, please also update the AgentSkill catalog at `skill/semantic-anchor-translator/references/catalog.md` so AI agents can discover it. See link:#/contributing[Contributing] for the full contribution workflow. + +== See also + +* link:#/socratic-recovery-skill[Socratic Code-Theory Recovery Skill] -- packages the brownfield documentation-recovery workflow as an installable skill that classifies every claim as code-derivable or open-to-the-team diff --git a/docs/agentskill.de.adoc b/docs/agentskill.de.adoc index 6d07e6a..e5e3977 100644 --- a/docs/agentskill.de.adoc +++ b/docs/agentskill.de.adoc @@ -133,3 +133,7 @@ Der vollständige Katalog mit Beschreibungen, Vertretern und Kernkonzepten ist u Wenn du einen neuen Anker zum Katalog hinzufügst, aktualisiere bitte auch den AgentSkill-Katalog unter `skill/semantic-anchor-translator/references/catalog.md`, damit KI-Agenten ihn finden können. Siehe link:#/contributing[Mitwirken] für den vollständigen Beitrags-Workflow. + +== Siehe auch + +* link:#/socratic-recovery-skill[Socratic Code-Theory Recovery Skill] -- verpackt den Brownfield-Dokumentations-Recovery-Workflow als installierbaren Skill, der jede Aussage als aus-Code-ableitbar oder ans-Team-offen klassifiziert diff --git a/docs/brownfield-workflow.adoc b/docs/brownfield-workflow.adoc index 5201d55..428dba6 100644 --- a/docs/brownfield-workflow.adoc +++ b/docs/brownfield-workflow.adoc @@ -59,6 +59,11 @@ Before changing anything, you need to recover the "theory" of the bounded contex This phase uses *Socratic Code-Theory Recovery*: a two-phase workflow that builds understanding through recursive question refinement before producing documentation. +[TIP] +==== +The prompts in this phase are also packaged as an installable Claude Code Skill. See link:#/socratic-recovery-skill[the Socratic Code-Theory Recovery Skill page] for installation instructions across Claude Code, Codex, Cursor, GitHub Copilot, Gemini CLI, and Amazon Kiro. +==== + === Phase 1: Build the Question Tree Start with five high-level questions about the bounded context and decompose them recursively. Use Semantic Anchors as decomposition guides: *arc42* for architecture, *Cockburn Use Cases* for specification, *ISO 25010* for quality, *Nygard ADRs* for decisions. diff --git a/docs/brownfield-workflow.de.adoc b/docs/brownfield-workflow.de.adoc index 364a1a6..d82e728 100644 --- a/docs/brownfield-workflow.de.adoc +++ b/docs/brownfield-workflow.de.adoc @@ -57,6 +57,11 @@ Vor jeder Änderung muss die "Theorie" des Bounded Context rekonstruiert werden Diese Phase nutzt *Socratic Code-Theory Recovery*: einen zweiphasigen Workflow, der Verständnis durch rekursive Frageverfeinerung aufbaut, bevor Dokumentation erzeugt wird. +[TIP] +==== +Die Prompts dieser Phase sind auch als installierbarer Claude Code Skill verpackt. Installations-Anleitungen für Claude Code, Codex, Cursor, GitHub Copilot, Gemini CLI und Amazon Kiro auf der link:#/socratic-recovery-skill[Skill-Seite Socratic Code-Theory Recovery]. +==== + === Phase 1: Question Tree aufbauen Mit fünf Top-Level-Fragen zum Bounded Context starten und sie rekursiv zerlegen. Semantic Anchors als Zerlegungs-Heuristiken einsetzen: *arc42* für Architektur, *Cockburn Use Cases* für Spezifikation, *ISO 25010* für Qualität, *Nygard ADRs* für Entscheidungen. diff --git a/docs/socratic-recovery-skill.adoc b/docs/socratic-recovery-skill.adoc new file mode 100644 index 0000000..892f4b7 --- /dev/null +++ b/docs/socratic-recovery-skill.adoc @@ -0,0 +1,206 @@ += Skill: Socratic Code-Theory Recovery +:toc: + +The Semantic Anchors project ships a **Claude Code Skill** that packages the link:#/brownfield[brownfield workflow] as an installable artifact. Once installed, the skill guides any compatible AI coding assistant through the two-phase recovery of a program's "theory" (Naur 1985) from source code. + +== What it does + +Recovers documentation from a brownfield codebase without hallucinating the parts the code cannot tell you. The skill enforces an auditable separation between *code-derivable facts* and *open questions* that must be answered by humans. + +=== Phase 1 — Build the Question Tree + +The skill prompts the LLM to recursively decompose five questions about a bounded context (Problem/Users, Specification, Architecture, Quality Goals, Risks). Each leaf is classified: + +* `[ANSWERED]` -- the LLM found it in the code, with `:` evidence +* `[OPEN]` -- the answer is not derivable from code; tagged with a Category and the role that must answer (Product Owner, Architect, Developer, Domain Expert, Operations) + +Outputs two AsciiDoc files: `QUESTION_TREE.adoc` (full reasoning trace) and `OPEN_QUESTIONS.adoc` (handoff, grouped by role). + +=== Between phases — Team answers the OPEN leaves + +`OPEN_QUESTIONS.adoc` is routed to humans by role. They write answers directly into the file. Deferred questions get an explicit `(deferred)` marker, not invention. + +=== Phase 2 — Synthesize documentation + +The skill takes the answered tree and produces a PRD, Cockburn use cases, an arc42 architecture document, and Nygard ADRs with Pugh matrices. Every claim cites a Q-ID; team-supplied facts are marked `(team answer)`. + +== When to use it + +Use this skill when: + +* Documentation is missing, outdated, or untrusted, and a change is about to be made. +* You want documentation that an auditor or new team member can trust -- every claim traces back to either code or a named team answer. +* You want to surface the *open questions* in the system, not just synthesize plausible-sounding prose over them. + +Do **not** use it when: + +* You are doing greenfield development -- use the link:#/spec-driven-development[spec-driven workflow] instead. +* You want to reverse-engineer the whole system at once -- the skill is designed to work one bounded context at a time. +* The code is not runnable -- fix that first. + +== Installation + +The skill follows the https://agentskills.io[agentskills.io] specification. Reference it from your project's instruction file for any compatible AI tool. + +=== link:https://docs.anthropic.com/en/docs/claude-code/memory[Claude Code] + +Reference the skill directly in `CLAUDE.md`: + +[source,markdown] +---- +## Skills + +Use the socratic-code-theory-recovery skill from +https://github.com/LLM-Coding/Semantic-Anchors/tree/main/skill/socratic-code-theory-recovery +when recovering documentation from a brownfield bounded context. + +Phase 1 prompt: +https://github.com/LLM-Coding/Semantic-Anchors/blob/main/skill/socratic-code-theory-recovery/prompts/phase-1-question-tree.md + +Phase 2 prompt: +https://github.com/LLM-Coding/Semantic-Anchors/blob/main/skill/socratic-code-theory-recovery/prompts/phase-2-synthesize.md +---- + +Or place the `skill/socratic-code-theory-recovery/SKILL.md` in your project's skills directory if your Claude Code setup uses a custom skills location. + +=== link:https://openai.com/codex/[Codex] + +Codex supports `AGENTS.md` for repository instructions: + +[source,markdown] +---- +## Documentation Recovery + +When working on a brownfield bounded context without documentation, use +the Socratic Code-Theory Recovery skill: +https://github.com/LLM-Coding/Semantic-Anchors/tree/main/skill/socratic-code-theory-recovery + +The skill enforces a two-phase workflow: build a Question Tree first +([ANSWERED] with code evidence vs [OPEN] with role), let the team answer +the OPEN leaves, then synthesize documentation with full Q-ID traceability. +---- + +=== link:https://github.com/google-gemini/gemini-cli[Gemini CLI] + +Add to `GEMINI.md`: + +[source,markdown] +---- +## Brownfield Documentation Recovery + +For recovering documentation from existing code, follow the +Socratic Code-Theory Recovery workflow: +https://github.com/LLM-Coding/Semantic-Anchors/tree/main/skill/socratic-code-theory-recovery + +Build a Question Tree before writing any documentation. Mark each leaf +[ANSWERED] (with file:line evidence) or [OPEN] (with Category and Ask role). +Synthesize docs from the answered tree only after the team has filled in +the OPEN leaves. Cite Q-IDs in every claim. +---- + +=== link:https://docs.cursor.com/[Cursor] + +Add to `.cursor/rules` or `.cursorrules` in your project: + +[source,markdown] +---- +## Brownfield Documentation Recovery + +When asked to document an existing module without docs, use the +Socratic Code-Theory Recovery workflow: +https://github.com/LLM-Coding/Semantic-Anchors/tree/main/skill/socratic-code-theory-recovery + +Build a Question Tree first. Each leaf must be [ANSWERED] (with code +evidence) or [OPEN] (with Category and Ask role). Do not write +documentation until the team has answered the [OPEN] leaves. +---- + +=== link:https://docs.github.com/copilot[GitHub Copilot] + +Add to `.github/copilot-instructions.md`: + +[source,markdown] +---- +## Brownfield Recovery + +For brownfield documentation tasks, follow the Socratic Code-Theory +Recovery workflow at +https://github.com/LLM-Coding/Semantic-Anchors/tree/main/skill/socratic-code-theory-recovery + +Two phases: first a Question Tree separating code-derivable facts from +open questions routed by role; second, synthesis with Q-ID traceability +after the team fills the gaps. +---- + +=== link:https://kiro.dev/[Amazon Kiro] + +Kiro builds on spec-driven development; this skill is the brownfield counterpart. Add to your project's `specs/` directory or to a spec file: + +[source,markdown] +---- +## Brownfield Documentation Recovery (Spec Onboarding) + +When onboarding an existing bounded context that has no spec, use the +Socratic Code-Theory Recovery skill: +https://github.com/LLM-Coding/Semantic-Anchors/tree/main/skill/socratic-code-theory-recovery + +The skill produces a Question Tree that classifies every claim as +[ANSWERED] (code evidence) or [OPEN] (role-routed). The synthesized +outputs are compatible with Kiro's spec format: a PRD, Cockburn use +cases (User Goal level), an arc42 architecture description, and Nygard +ADRs with Pugh matrices. Use these as the starting point for the +generated spec. +---- + +=== Other AI tools + +Any AI assistant that accepts a system prompt or custom instructions can use this skill. Point it to: + +* `SKILL.md` (overview) -- https://github.com/LLM-Coding/Semantic-Anchors/blob/main/skill/socratic-code-theory-recovery/SKILL.md +* Phase 1 prompt -- https://github.com/LLM-Coding/Semantic-Anchors/blob/main/skill/socratic-code-theory-recovery/prompts/phase-1-question-tree.md +* Phase 2 prompt -- https://github.com/LLM-Coding/Semantic-Anchors/blob/main/skill/socratic-code-theory-recovery/prompts/phase-2-synthesize.md + +== What's inside the skill + +[cols="1,3"] +|=== +| File | Role + +| `SKILL.md` +| Frontmatter, when-to-use, two-phase workflow overview, what the LLM can and cannot recover, drift handling + +| `prompts/phase-1-question-tree.md` +| The copy-paste Phase 1 prompt plus post-prompt sanity-check and team-routing instructions + +| `prompts/phase-2-synthesize.md` +| The Phase 2 prompt producing PRD, Cockburn use cases, arc42, and Nygard ADRs + +| `references/arc42.md` +| arc42's 12 sections as Q3 decomposition heuristic + +| `references/cockburn-use-cases.md` +| Fully Dressed fields as Q2 sub-questions; persona vs system use cases + +| `references/iso-25010.md` +| 8 quality characteristics as Q4 sub-questions; mechanism-vs-target split + +| `references/nygard-adrs.md` +| ADR fields as Q3.9 sub-tree; what makes a decision architecturally significant; Pugh-matrix guidance + +| `references/output-schema.md` +| Strict format for `QUESTION_TREE.adoc` and `OPEN_QUESTIONS.adoc`; Q-ID scheme; `[ANSWERED]`/`[OPEN]` block formats; Phase 2 traceability rules + +| `references/examples.md` +| Worked `[ANSWERED]` and `[OPEN]` leaves for each major branch (Q1-Q5) +|=== + +== Further reading + +* link:#/brownfield[Brownfield Workflow] -- the full methodology that this skill packages +* link:#/brownfield-experiment-report[Brownfield Experiment Report] -- controlled experiment behind the methodology +* link:#/brownfield-fair-comparison[Fair Comparison Report] -- three recovery approaches with identical team answers +* Peter Naur, "Programming as Theory Building" (1985) -- https://pages.cs.wisc.edu/~remzi/Naur.pdf + +== See also + +* link:#/agentskill[Semantic Anchor Translator Skill] -- recognises verbose concept descriptions and suggests the established anchor term diff --git a/docs/socratic-recovery-skill.de.adoc b/docs/socratic-recovery-skill.de.adoc new file mode 100644 index 0000000..6625ad3 --- /dev/null +++ b/docs/socratic-recovery-skill.de.adoc @@ -0,0 +1,206 @@ += Skill: Socratic Code-Theory Recovery +:toc: + +Das Semantic-Anchors-Projekt liefert einen **Claude Code Skill** aus, der den link:#/brownfield[Brownfield-Workflow] als installierbares Artefakt verpackt. Einmal installiert, führt der Skill einen kompatiblen AI-Coding-Assistenten durch die zweiphasige Wiederherstellung der "Theorie" eines Programms (Naur 1985) aus dem Quellcode. + +== Was er macht + +Stellt Dokumentation aus einem Brownfield-Codebase wieder her, ohne die Lücken zu halluzinieren, die der Code nicht beantworten kann. Der Skill erzwingt eine prüfbare Trennung zwischen *aus Code ableitbaren Fakten* und *offenen Fragen*, die Menschen beantworten müssen. + +=== Phase 1 — Question Tree aufbauen + +Der Skill weist das LLM an, fünf Fragen zum Bounded Context (Problem/User, Spezifikation, Architektur, Qualitätsziele, Risiken) rekursiv zu zerlegen. Jedes Blatt wird klassifiziert: + +* `[ANSWERED]` -- das LLM hat es im Code gefunden, mit `:`-Evidenz +* `[OPEN]` -- die Antwort steckt nicht im Code; mit Category und der Rolle markiert, die antworten muss (Product Owner, Architect, Developer, Domain Expert, Operations) + +Output sind zwei AsciiDoc-Dateien: `QUESTION_TREE.adoc` (vollständige Begründungs-Spur) und `OPEN_QUESTIONS.adoc` (Handoff, nach Rolle gruppiert). + +=== Zwischen den Phasen — Team beantwortet die OPEN-Leafs + +`OPEN_QUESTIONS.adoc` wird rollenweise an Menschen geleitet. Sie schreiben Antworten direkt in die Datei. Verschobene Fragen bekommen einen expliziten `(deferred)`-Marker, keine Erfindung. + +=== Phase 2 — Dokumentation synthetisieren + +Der Skill nimmt den beantworteten Baum und erzeugt ein PRD, Cockburn Use Cases, eine arc42-Architekturbeschreibung und Nygard-ADRs mit Pugh-Matrix. Jede Aussage zitiert eine Q-ID; team-gegebene Fakten sind mit `(team answer)` markiert. + +== Wann zu verwenden + +Den Skill verwenden, wenn: + +* Dokumentation fehlt, veraltet ist oder nicht vertrauenswürdig, und eine Änderung ansteht. +* Du Dokumentation willst, der ein Auditor oder neues Team-Mitglied trauen kann -- jede Aussage führt zurück entweder auf Code oder auf eine benannte Team-Antwort. +* Du die *offenen Fragen* im System sichtbar machen willst, statt sie mit plausibel klingendem Text zu überschreiben. + +**Nicht** verwenden, wenn: + +* Du Greenfield-Entwicklung machst -- dafür den link:#/spec-driven-development[Spec-Driven-Workflow]. +* Du das ganze System auf einmal reverse-engineeren willst -- der Skill ist auf einen Bounded Context nach dem anderen ausgelegt. +* Der Code nicht lauffähig ist -- das zuerst beheben. + +== Installation + +Der Skill folgt der https://agentskills.io[agentskills.io]-Spezifikation. Verweise aus der Instruction-Datei deines Projekts auf den Skill, je nach AI-Tool: + +=== link:https://docs.anthropic.com/en/docs/claude-code/memory[Claude Code] + +Skill in `CLAUDE.md` referenzieren: + +[source,markdown] +---- +## Skills + +Use the socratic-code-theory-recovery skill from +https://github.com/LLM-Coding/Semantic-Anchors/tree/main/skill/socratic-code-theory-recovery +when recovering documentation from a brownfield bounded context. + +Phase 1 prompt: +https://github.com/LLM-Coding/Semantic-Anchors/blob/main/skill/socratic-code-theory-recovery/prompts/phase-1-question-tree.md + +Phase 2 prompt: +https://github.com/LLM-Coding/Semantic-Anchors/blob/main/skill/socratic-code-theory-recovery/prompts/phase-2-synthesize.md +---- + +Alternativ kann `skill/socratic-code-theory-recovery/SKILL.md` ins Skills-Verzeichnis deines Projekts gelegt werden, wenn die Claude-Code-Konfiguration einen eigenen Skills-Pfad nutzt. + +=== link:https://openai.com/codex/[Codex] + +Codex unterstützt `AGENTS.md` für Repo-Anweisungen: + +[source,markdown] +---- +## Documentation Recovery + +When working on a brownfield bounded context without documentation, use +the Socratic Code-Theory Recovery skill: +https://github.com/LLM-Coding/Semantic-Anchors/tree/main/skill/socratic-code-theory-recovery + +The skill enforces a two-phase workflow: build a Question Tree first +([ANSWERED] with code evidence vs [OPEN] with role), let the team answer +the OPEN leaves, then synthesize documentation with full Q-ID traceability. +---- + +=== link:https://github.com/google-gemini/gemini-cli[Gemini CLI] + +In `GEMINI.md` ergänzen: + +[source,markdown] +---- +## Brownfield Documentation Recovery + +For recovering documentation from existing code, follow the +Socratic Code-Theory Recovery workflow: +https://github.com/LLM-Coding/Semantic-Anchors/tree/main/skill/socratic-code-theory-recovery + +Build a Question Tree before writing any documentation. Mark each leaf +[ANSWERED] (with file:line evidence) or [OPEN] (with Category and Ask role). +Synthesize docs from the answered tree only after the team has filled in +the OPEN leaves. Cite Q-IDs in every claim. +---- + +=== link:https://docs.cursor.com/[Cursor] + +In `.cursor/rules` oder `.cursorrules` ergänzen: + +[source,markdown] +---- +## Brownfield Documentation Recovery + +When asked to document an existing module without docs, use the +Socratic Code-Theory Recovery workflow: +https://github.com/LLM-Coding/Semantic-Anchors/tree/main/skill/socratic-code-theory-recovery + +Build a Question Tree first. Each leaf must be [ANSWERED] (with code +evidence) or [OPEN] (with Category and Ask role). Do not write +documentation until the team has answered the [OPEN] leaves. +---- + +=== link:https://docs.github.com/copilot[GitHub Copilot] + +In `.github/copilot-instructions.md` ergänzen: + +[source,markdown] +---- +## Brownfield Recovery + +For brownfield documentation tasks, follow the Socratic Code-Theory +Recovery workflow at +https://github.com/LLM-Coding/Semantic-Anchors/tree/main/skill/socratic-code-theory-recovery + +Two phases: first a Question Tree separating code-derivable facts from +open questions routed by role; second, synthesis with Q-ID traceability +after the team fills the gaps. +---- + +=== link:https://kiro.dev/[Amazon Kiro] + +Kiro setzt auf Spec-Driven Development auf; dieser Skill ist das Brownfield-Pendant. Im `specs/`-Verzeichnis des Projekts oder in einer Spec-Datei ergänzen: + +[source,markdown] +---- +## Brownfield Documentation Recovery (Spec Onboarding) + +When onboarding an existing bounded context that has no spec, use the +Socratic Code-Theory Recovery skill: +https://github.com/LLM-Coding/Semantic-Anchors/tree/main/skill/socratic-code-theory-recovery + +The skill produces a Question Tree that classifies every claim as +[ANSWERED] (code evidence) or [OPEN] (role-routed). The synthesized +outputs are compatible with Kiro's spec format: a PRD, Cockburn use +cases (User Goal level), an arc42 architecture description, and Nygard +ADRs with Pugh matrices. Use these as the starting point for the +generated spec. +---- + +=== Andere AI-Tools + +Jeder Assistent, der einen System-Prompt oder Custom Instructions akzeptiert, kann den Skill nutzen. Verweise auf: + +* `SKILL.md` (Übersicht) -- https://github.com/LLM-Coding/Semantic-Anchors/blob/main/skill/socratic-code-theory-recovery/SKILL.md +* Phase-1-Prompt -- https://github.com/LLM-Coding/Semantic-Anchors/blob/main/skill/socratic-code-theory-recovery/prompts/phase-1-question-tree.md +* Phase-2-Prompt -- https://github.com/LLM-Coding/Semantic-Anchors/blob/main/skill/socratic-code-theory-recovery/prompts/phase-2-synthesize.md + +== Was im Skill steckt + +[cols="1,3"] +|=== +| Datei | Funktion + +| `SKILL.md` +| Frontmatter, When-to-use, Zwei-Phasen-Workflow, was das LLM rekonstruieren kann und was nicht, Drift-Handling + +| `prompts/phase-1-question-tree.md` +| Der Copy-paste Phase-1-Prompt plus Post-Prompt-Sanity-Check und Team-Routing-Anweisungen + +| `prompts/phase-2-synthesize.md` +| Der Phase-2-Prompt, der PRD, Cockburn Use Cases, arc42 und Nygard-ADRs erzeugt + +| `references/arc42.md` +| arc42 12 Sektionen als Q3-Zerlegungs-Heuristik + +| `references/cockburn-use-cases.md` +| Fully-Dressed-Felder als Q2-Subfragen; Persona- vs. System-Use-Cases + +| `references/iso-25010.md` +| 8 Qualitätsmerkmale als Q4-Subfragen; Mechanismus-vs-Target-Trennung + +| `references/nygard-adrs.md` +| ADR-Felder als Q3.9-Sub-Tree; was eine Entscheidung architektonisch signifikant macht; Pugh-Matrix-Leitfaden + +| `references/output-schema.md` +| Striktes Format für `QUESTION_TREE.adoc` und `OPEN_QUESTIONS.adoc`; Q-ID-Schema; `[ANSWERED]`/`[OPEN]`-Blockformate; Phase-2-Traceability-Regeln + +| `references/examples.md` +| Worked `[ANSWERED]` und `[OPEN]` Leaves für jeden Hauptast (Q1-Q5) +|=== + +== Weiterführende Literatur + +* link:#/brownfield[Brownfield Workflow] -- die volle Methodik, die dieser Skill verpackt +* link:#/brownfield-experiment-report[Brownfield Experiment Report] -- kontrolliertes Experiment hinter der Methodik +* link:#/brownfield-fair-comparison[Fair Comparison Report] -- drei Recovery-Ansätze mit identischen Team-Antworten +* Peter Naur, "Programming as Theory Building" (1985) -- https://pages.cs.wisc.edu/~remzi/Naur.pdf + +== Siehe auch + +* link:#/agentskill[Semantic Anchor Translator Skill] -- erkennt umschriebene Konzeptbeschreibungen und schlägt den etablierten Anchor-Term vor diff --git a/docs/spec-driven-workflow.adoc b/docs/spec-driven-workflow.adoc index fcba182..54213e2 100644 --- a/docs/spec-driven-workflow.adoc +++ b/docs/spec-driven-workflow.adoc @@ -4,6 +4,11 @@ Ralf D. Müller :toc: :toc-placement: preamble +[NOTE] +==== +*Working on an existing codebase?* This page describes the greenfield workflow — starting from a blank slate. For brownfield projects (existing code, missing or untrusted documentation), see the link:#/brownfield[Brownfield Workflow] and the installable link:#/socratic-recovery-skill[Socratic Code-Theory Recovery Skill]. +==== + == Introduction This document describes how to build production-quality software with AI agents, guided by link:#/[Semantic Anchors]. diff --git a/docs/spec-driven-workflow.de.adoc b/docs/spec-driven-workflow.de.adoc index a89b9c5..25b0475 100644 --- a/docs/spec-driven-workflow.de.adoc +++ b/docs/spec-driven-workflow.de.adoc @@ -6,6 +6,11 @@ :toclevels: 3 :source-highlighter: highlight.js +[NOTE] +==== +*Bestehender Codebase?* Diese Seite beschreibt den Greenfield-Workflow — Start auf der grünen Wiese. Für Brownfield-Projekte (existierender Code, fehlende oder unzuverlässige Dokumentation) siehe den link:#/brownfield[Brownfield-Workflow] und den installierbaren link:#/socratic-recovery-skill[Socratic Code-Theory Recovery Skill]. +==== + == Einleitung Dieses Dokument beschreibt, wie man mit KI-Agenten produktionsreife Software entwickelt, gesteuert durch link:#/[Semantic Anchors]. diff --git a/scripts/prerender-routes.js b/scripts/prerender-routes.js index f6bd4b3..affda00 100644 --- a/scripts/prerender-routes.js +++ b/scripts/prerender-routes.js @@ -72,6 +72,13 @@ const ROUTES = [ description: 'Three approaches (Direct, Socratic, Two-Phase) compared with identical team answers. Measures the structural value of the Question Tree, not the answers.', }, + { + path: '/socratic-recovery-skill', + fragment: 'docs/socratic-recovery-skill.html', + title: 'Socratic Code-Theory Recovery Skill — Semantic Anchors', + description: + 'Installable Claude Code Skill that packages the brownfield documentation-recovery workflow. Two-phase Question Tree with [ANSWERED]/[OPEN] leaves, Q-ID traceability. Install on Claude Code, Codex, Cursor, GitHub Copilot, Gemini CLI, and Amazon Kiro.', + }, { path: '/contracts', fragment: 'docs/contracts.html', diff --git a/scripts/render-docs.js b/scripts/render-docs.js index e4f1b3c..4cbe5d5 100644 --- a/scripts/render-docs.js +++ b/scripts/render-docs.js @@ -103,6 +103,15 @@ renderFile( path.join(WEB_DOCS, 'brownfield-fair-comparison.html') ) +renderFile( + path.join(ROOT, 'docs/socratic-recovery-skill.adoc'), + path.join(WEB_DOCS, 'socratic-recovery-skill.html') +) +renderFile( + path.join(ROOT, 'docs/socratic-recovery-skill.de.adoc'), + path.join(WEB_DOCS, 'socratic-recovery-skill.de.html') +) + renderFile( path.join(ROOT, 'docs/anchor-evaluations.adoc'), path.join(WEB_DOCS, 'anchor-evaluations.html') diff --git a/website/src/main.js b/website/src/main.js index 72e15e5..b474e79 100644 --- a/website/src/main.js +++ b/website/src/main.js @@ -151,6 +151,7 @@ function initApp() { addRoute('/brownfield', renderBrownfieldPage) addRoute('/brownfield-experiment-report', renderBrownfieldExperimentReportPage) addRoute('/brownfield-fair-comparison', renderBrownfieldFairComparisonPage) + addRoute('/socratic-recovery-skill', renderSocraticRecoverySkillPage) addRoute('/contracts', renderContractsPageHandler) addRoute('/evaluations', renderEvaluationsPage) @@ -297,6 +298,15 @@ function renderBrownfieldFairComparisonPage() { loadDocContent('docs/brownfield-fair-comparison.adoc') } +function renderSocraticRecoverySkillPage() { + const pageContent = document.getElementById('page-content') + if (!pageContent) return + + pageContent.innerHTML = renderDocPage() + updateActiveNavLink() + loadDocContent('docs/socratic-recovery-skill.adoc') +} + function renderContractsPageHandler() { const pageContent = document.getElementById('page-content') if (!pageContent) return diff --git a/website/src/utils/router.js b/website/src/utils/router.js index 20fa8f1..0e9e2ee 100644 --- a/website/src/utils/router.js +++ b/website/src/utils/router.js @@ -20,6 +20,7 @@ const ROUTE_TITLES = { '/brownfield': 'Brownfield Workflow — Semantic Anchors', '/brownfield-experiment-report': 'Brownfield Experiment 1a Report — Semantic Anchors', '/brownfield-fair-comparison': 'Brownfield Fair Comparison — Semantic Anchors', + '/socratic-recovery-skill': 'Socratic Code-Theory Recovery Skill — Semantic Anchors', '/evaluations': 'Evaluations — Semantic Anchors', '/contributing': 'Contributing — Semantic Anchors', '/changelog': 'Changelog — Semantic Anchors',