diff --git a/.agent/workflows/modes/agentic-workflow.md b/.agent/workflows/modes/agentic-workflow.md deleted file mode 100644 index e6c4d677c2..0000000000 --- a/.agent/workflows/modes/agentic-workflow.md +++ /dev/null @@ -1,478 +0,0 @@ ---- -description: Load comprehensive knowledge about the PlatformPlatform agentic workflow system ---- -# Agentic Workflow System Knowledge - -You now have complete knowledge of the agentic workflow system used in this codebase. - -## System Architecture Overview - -**Core Concept**: Hierarchical AI agent system where coordinator delegates to engineers, engineers delegate to reviewers. All agents run as interactive worker-hosts that communicate via request/response files in a shared messages directory. - -**Agent Hierarchy**: -``` -Tech Lead (creates PRDs) - └─→ Hands off to Coordinator - -Coordinator (orchestrates implementation) - ├─→ Backend Engineer → Backend Reviewer (commits) - ├─→ Frontend Engineer → Frontend Reviewer (commits) - └─→ QA Engineer → QA Reviewer (commits) -``` - -**Process Structure**: Each agent type has two processes: -- **Worker-host** (C# CLI): Manages lifecycle, file watching, launches Claude Code workers -- **Worker agent** (Claude Code): Does actual AI work, uses MCP tools, self-destructs when done - -**Key Mechanisms**: -- Session management: Explicit GUIDs in `.claude-session-id` files -- Request detection: FileSystemWatcher monitors `*.{agentType}.request.*.md` files -- Process monitoring: Inactivity detection (20-62 min), restart logic (max 2 restarts) -- Task recovery: Prompts user to continue incomplete tasks on startup - -## Terminology Standards - -**The PlatformPlatform workflow is tool-agnostic**. Users can switch between Linear, AzureDevOps, Jira, or even markdown files by simply changing `[PRODUCT_MANAGEMENT_TOOL]` in AGENTS.md. - -### Use These Standardized Terms - -When writing or updating `.agent/workflows/process/**` files, system prompts, agent definitions, or any workflow documentation: - -**Work Item Hierarchy**: -- `[feature]` / `[features]` or `[Feature]` / `[Features]` — a collection of tasks -- `[task]` / `[tasks]` or `[Task]` / `[Tasks]` — a complete vertical slice implementation unit -- `[subtask]` / `[subtasks]` or `[Subtask]` / `[Subtasks]` — bullet points in task descriptions (not tracked separately) - -**Status Flow**: -- For [Feature]: `[Planned]` → `[Active]` → `[Resolved]` -- For [Task]: `[Planned]` → `[Active]` → `[Review]` → `[Completed]` -- For [Subtask]: No status (just bullets in description) - -Use capitalized forms (`[Feature]`, `[Task]`, `[Subtask]`) when it reads more naturally in sentences. - -### Don't Use Tool-Specific Terms - -**Forbidden terms** (these are specific to certain tools): -- ❌ Issue, Issues -- ❌ User Story, User Stories -- ❌ Epic, Epics -- ❌ Work Item, Work Items -- ❌ Ticket, Tickets -- ❌ Bug, Bugs (unless specifically referring to a defect type) -- ❌ Tool-specific status names (New, Doing, Done, In Progress, Resolved, Closed, etc.) - -**Why this matters**: -- Linear uses "Issue" for everything -- AzureDevOps uses "Work Item" with types (Epic, Feature, User Story, Task, Bug) -- Jira uses "Issue" with types (Epic, Story, Task, Sub-task) -- Our workflow must work with ALL of these tools without modification - -### Mapping Examples - -When the workflow runs, the underlying tool maps our generic terms: - -**AzureDevOps**: -- `[feature]` → User Story work item type -- `[task]` → Task work item type -- `[subtask]` → Bullet point in task description -- For [Feature]: `[Planned]` → New, `[Active]` → Active, `[Resolved]` → Resolved -- For [Task]: `[Planned]` → New, `[Active]` → Active, `[Review]` → Resolved, `[Completed]` → Closed - -**Linear**: -- `[feature]` → Project -- `[task]` → Issue -- `[subtask]` → Bullet point in task description -- For [Feature]: `[Planned]` → Todo, `[Active]` → In Progress, `[Resolved]` → In Progress -- For [Task]: `[Planned]` → Todo, `[Active]` → In Progress, `[Review]` → In Review, `[Completed]` → Done - -**The workflow code handles these mappings**—your job is to use ONLY the standardized terms in all documentation. - -### When Updating Workflow Files - -**Before making changes to `.agent/workflows/process/**` files**: - -1. Search for tool-specific terms and replace with standardized terms -2. Verify status flows use only `[Planned]` → `[Active]` → `[Review]` → `[Completed]` -3. Never add hints or examples that reference specific tool terminology -4. Use `[PRODUCT_MANAGEMENT_TOOL]` placeholder when referring to the tool itself - -**Example - GOOD**: -```markdown -1. Retrieve the [Feature] from [PRODUCT_MANAGEMENT_TOOL] -2. Load all [Tasks] from the [Feature] -3. Move the [Feature] to [Active] status -4. For each [Task], implement and move to [Review] -``` - -**Example - BAD**: -```markdown -1. Retrieve the Feature from AzureDevOps (or Project from Linear) -2. Break down the Feature into User Stories, then into Tasks -3. Move the Feature to "In Progress" status -4. For each task, implement and move to "Code Review" -``` - -## Workspace Structure - -Agent workspaces are organized based on whether agents are branch-specific or branch-agnostic: - -**Branch-agnostic agents** (pair-programmer, tech-lead): -``` -.workspace/agent-workspaces/ -├── pair-programmer/ # Branch-agnostic workspace -│ ├── .host-process-id # Worker-host PID -│ ├── .worker-process-id # Claude Code PID (when active) -│ ├── .claude-session-id # Session GUID for persistence -│ ├── current-task.json # Active task metadata -│ ├── *.claude-session-id # Saved sessions -│ ├── logs/ # Workflow event logs -│ │ └── developer-cli-{date}.log -│ └── feedback-reports/ # Problem reports -│ ├── problems/ # Open issues -│ │ └── {timestamp}-{severity}-{slug}.md -│ └── done/ # Resolved issues -└── tech-lead/ # Branch-agnostic workspace - └── (same structure as pair-programmer) -``` - -**Branch-specific agents** (coordinator, engineers, reviewers): -``` -.workspace/agent-workspaces/{branch}/ -├── messages/ # Shared request/response files -│ ├── .task-counter # Increments for each task (0001, 0002, etc.) -│ ├── NNNN.{agent}.request.{slug}.md # Delegation requests -│ └── NNNN.{agent}.response.{slug}.md # Agent responses -├── {agent-type}/ # Per-agent workspace -│ ├── .host-process-id # Worker-host PID -│ ├── .worker-process-id # Claude Code PID (when active) -│ ├── .claude-session-id # Session GUID for persistence -│ └── current-task.json # Active task metadata -├── developer-cli-{date}.log # Workflow event logs (at branch root) -└── feedback-reports/ # Problem reports from agents - ├── problems/ # Open issues (YAML frontmatter) - │ └── {timestamp}-{severity}-{slug}.md - └── done/ # Resolved issues -``` - -## Agent Types and Responsibilities - -### Tech Lead (`tech-lead`) -- Creates PRDs and defines features -- Conducts research and discovery -- Defines what to build, not how -- Doesn't implement features or delegate to engineers -- Hands off to coordinator for implementation -- Runs continuously, relaunching after each session ends -- Auto-launches immediately when started - -### Coordinator (`coordinator`) -- Orchestrates feature implementation -- Delegates tasks to engineers via Task tool (proxy agents) -- Monitors progress through response files -- Doesn't code or commit -- Runs continuously, relaunching after each session ends -- Auto-launches immediately when started -- Prompts user to select feature, then runs `/process:implement-feature` - -### Engineers (`backend-engineer`, `frontend-engineer`, `qa-engineer`) -- Implement code within their specialty (backend: Core/Api/Tests, frontend: WebApp, qa: e2e tests) -- Run tests and validation tools -- Delegate to their corresponding reviewer for approval -- Iterate on reviewer feedback until approved -- Session persists across tasks (via `.claude-session-id`) -- Wait for MCP delegation from coordinator - -### Reviewers (`backend-reviewer`, `frontend-reviewer`, `qa-reviewer`) -- Review code quality, architecture, and adherence to rules -- Run validation tools (build, test, format) -- **Commit approved code** and provide commit hash -- Reject with detailed feedback if issues found -- Return control to engineer (via response file) -- Wait for MCP delegation from their engineer - -### Pair Programmer (`pair-programmer`) -- General-purpose engineer for direct user collaboration -- Can work on any code (no boundaries) -- Auto-launches immediately when started -- User steers work directly through conversation -- Commits directly for workflow/system fixes - -## Communication Protocol - -### Request/Response Pattern - -**Request file format** (`NNNN.{agent}.request.{slug}.md`): -```yaml ---- -from: {sender-agent-type} -to: {target-agent-type} -request-number: NNNN -timestamp: 2025-11-01T14:30:00+01:00 -feature-id: {feature-id-from-PRODUCT_MANAGEMENT_TOOL} -task-id: {task-id-from-PRODUCT_MANAGEMENT_TOOL} ---- - -[Markdown content with task description] -``` - -**Response file format** (`NNNN.{agent}.response.{slug}.md`): -```markdown -[Agent's response after completing work] -``` - -### Delegation Flow - -1. **Coordinator → Engineer**: - - Coordinator creates request file via Task tool → proxy agent → MCP `start_worker_agent` - - Engineer's worker-host detects file via FileSystemWatcher - - Engineer launches Claude Code worker with `/process:implement-task` slash command - - Engineer implements code, runs tests - -2. **Engineer → Reviewer**: - - Engineer creates request file via MCP `start_worker_agent` - - Reviewer's worker-host detects file - - Reviewer launches with `/process:review-task` slash command - - Reviewer validates and either approves (commits) or rejects - -3. **If Rejected**: - - Reviewer writes response with rejection reason - - Engineer receives response, fixes issues - - Engineer delegates to reviewer again (loop continues) - -4. **If Approved**: - - Reviewer commits code, writes response with commit hash - - Engineer receives response with commit confirmation - - Engineer completes task, writes response to coordinator - -5. **Coordinator Receives Completion**: - - Coordinator gets response from engineer - - Coordinator proceeds to next task - -## Problem Reports System - -### Reading Problem Reports - -Agents create problem reports when encountering workflow/system bugs (NOT feature bugs). - -**Location**: -- Branch-specific agents: `.workspace/agent-workspaces/{branch}/feedback-reports/problems/` -- Branch-agnostic agents: `.workspace/agent-workspaces/{agentType}/feedback-reports/problems/` - -**YAML Frontmatter Format**: -```yaml ---- -report-id: HH-MM-SS-{severity}-{slug} -timestamp: 2025-11-01T14:30:00+01:00 -reporter: {agent-type} -severity: error|warning|info -location: {file-path-or-context} -status: open|resolved ---- - -# Problem Title - -## Description -[Detailed description of the workflow/system bug] - -## What Happened -[Specific sequence of events] - -## Root Cause -[Analysis of why it happened] - -## Suggested Fix -[Recommendations for fixing] -``` - -### Processing Problem Reports - -When working on problem reports: - -1. **Read reports** with `status: open` -2. **Prioritize**: error > warning > info -3. **Analyze**: Read affected files to understand root cause -4. **Fix**: Make targeted changes (system prompts, MCP tools, workflow code, agent definitions) -5. **Validate**: Run appropriate tools (build, test, format) -6. **Commit**: Descriptive message, optionally reference report filename -7. **Move**: Move report file from `problems/` to `problems/done/` - -**Example workflow**: -```bash -# Read problem -Read: .workspace/agent-workspaces/cto/feedback-reports/problems/14-30-00-error-mcp-tool-fails.md - -# Fix the issue -Edit: developer-cli/Commands/McpCommand.cs - -# Validate -Use: mcp__developer-cli__execute_command (command: "build", backend: true) - -# Commit -git add developer-cli/Commands/McpCommand.cs -git commit -m "Fix MCP tool parameter validation" - -# Move to done -mv .workspace/agent-workspaces/cto/feedback-reports/problems/14-30-00-error-mcp-tool-fails.md \ - .workspace/agent-workspaces/cto/feedback-reports/problems/done/ -``` - -### Types of Problems - -**Report these** (workflow/system bugs): -- MCP tool errors or incorrect parameters -- System prompt contradictions or missing guidance -- Agent communication failures or message format issues -- Workflow file paths that don't exist -- Agent definitions with wrong tool permissions -- Slash commands with incorrect instructions - -**Don't report** (feature/implementation issues): -- Business logic bugs -- Missing product features -- Code quality problems in production code -- Unclear product requirements -- Your own implementation bugs - -## Session Management - -### Session Persistence - -**`.claude-session-id` file**: -- Contains: GUID for explicit session tracking -- Created: Before first agent launch (if not exists) -- Used: To resume sessions with `claude --resume {guid}` -- Never deleted: Enables conversation continuity across tasks -- Shared across tasks: Same session ID used for all tasks in a [feature] - -### Memory Reset - -**When to reset**: -- Starting a new task (fresh context needed) -- Agent stuck or producing poor quality work -- Coordinator triggers via MCP with `resetMemory: true` - -**How to reset**: -- Delete `.claude-session-id` file -- Next launch creates new session -- Memory reset cascades from engineer to reviewer automatically - -## Commit Permissions and Protocols - -### Who Can Commit - -- ✅ **Reviewers**: Always commit approved code (their primary job) -- ✅ **Pair-programmer**: Can commit directly for workflow/system fixes -- ❌ **Engineers**: Never commit (must go through reviewer) -- ❌ **Tech-lead**: Never commits -- ❌ **Coordinator**: Never commits - -### Commit Protocol - -**Standard process** (engineers): -1. Implement code -2. Delegate to reviewer -3. Reviewer commits if approved -4. Engineer never commits directly - -**For workflow/system fixes** (pair-programmer): -1. Make changes to system prompts, agent definitions, MCP tools, etc. -2. Run validation (build, test, format as appropriate) -3. Commit directly with descriptive message -4. Move problem report to done/ if applicable - -**Commit message format**: -- Imperative mood, capital letter, no ending punctuation -- Single line, concise description + motivation -- Examples: - - "Fix MCP tool parameter validation for reviewer agents" - - "Add task scope guidance to all engineer system prompts" - - "Sanitize task titles to handle forward slashes in filenames" - -## Logs and Monitoring - -### Workflow Event Logs - -**Location**: -- Branch-specific agents: `.workspace/agent-workspaces/{branch}/developer-cli-{date}.log` -- Branch-agnostic agents: `.workspace/agent-workspaces/{agentType}/logs/developer-cli-{date}.log` - -**Format**: -``` -[2025-11-01 14:30:00] [0001.backend-engineer.request] Started: 'Create API endpoints' -[2025-11-01 14:45:00] [0001.backend-engineer.response] Completed: 'API endpoints implemented' -[2025-11-01 14:46:00] [0002.backend-reviewer.request] Started: 'Review the work' -[2025-11-01 14:50:00] [0002.backend-reviewer.response] Approved with commit: abc123def -``` - -**Use logs to**: -- Understand task flow and timing -- Debug delegation issues -- Track agent activity and progress -- Identify performance bottlenecks - -## Key Implementation Files - -Understanding these files helps debug workflow issues: - -- `developer-cli/Commands/ClaudeAgentCommand.cs` — Worker-host lifecycle, session management, process monitoring -- `developer-cli/Commands/McpCommand.cs` — MCP server exposing `start_worker_agent` tool -- `developer-cli/Utilities/ClaudeAgentLifecycle.cs` — Worker completion logic, file creation -- `.agent/agentic-workflow/system-prompts/*.txt` — Agent behavior and rules -- `.agent/agents/*.md` — Agent definitions for Task tool (proxy agents) -- `.agent/workflows/**/*.md` — Slash command workflows - -## Best Practices - -### For Problem Reports -1. Always process in severity order (error → warning → info) -2. Read ALL related files before making changes -3. Make targeted, minimal fixes (no scope creep) -4. Test changes appropriately -5. Move reports to done/ after resolving - -### For Commits -1. One logical change per commit -2. Descriptive messages following repo conventions -3. Never commit without user permission (check CLAUDE.md) -4. Reference problem report IDs when applicable - -### For System Prompts -1. Keep concise, avoid redundancy -2. Follow established patterns across agents -3. Use standardized terminology: `[feature]`, `[task]`, `[subtask]`, `[Planned]`, `[Active]`, `[Review]`, `[Resolved]`, `[Completed]` -4. Don't use tool-specific terms (Issue, User Story, Epic, Work Item, etc.) -5. Be token-efficient (agents read these on every launch) - -### For Workflow Files (.agent/workflows/process/**) -1. **Before editing**: Review the "Terminology Standards" section above -2. Use ONLY standardized terms: `[feature]`, `[task]`, `[subtask]` -3. Use ONLY standardized statuses: `[Planned]`, `[Active]`, `[Review]`, `[Resolved]`, `[Completed]` -4. Replace any tool-specific terms found (Issue, User Story, Epic, Work Item, etc.) -5. Use `[PRODUCT_MANAGEMENT_TOOL]` when referring to the tool itself -6. Never include tool-specific examples or hints in parentheses -7. Keep the workflow completely tool-agnostic - -### For Validation -1. Always run appropriate tools after changes: - - Modified .cs files: build, format, test, inspect - - Modified system prompts: check for contradictions - - Modified agent definitions: validate YAML frontmatter - - Modified workflow files: verify no tool-specific terms exist - -### For Workspace Cleanliness -1. Move resolved reports to done/ -2. Keep problems/ directory clean -3. Archive old message files periodically (manual process) -4. Monitor log file size - ---- - -You now have complete knowledge of the agentic workflow system. Use this knowledge to: -- **Maintain tool-agnostic terminology** in ALL workflow documentation -- Work effectively with problem reports -- Understand agent communication patterns -- Make workflow improvements -- Debug delegation issues -- Process system bugs efficiently - -**Remember**: The workflow's portability across different product management tools depends on strict adherence to standardized terminology—always use `[feature]`, `[task]`, `[subtask]` and status flows: For [Feature]: `[Planned]` → `[Active]` → `[Resolved]`. For [Task]: `[Planned]` → `[Active]` → `[Review]` → `[Completed]`. Never use tool-specific terms. diff --git a/.agent/workflows/modes/coordinator.md b/.agent/workflows/modes/coordinator.md deleted file mode 100644 index 1e8e065f48..0000000000 --- a/.agent/workflows/modes/coordinator.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -description: Workflow for coordinate feature implementation by delegating tasks to specialized engineers ---- -# Coordinator Mode - -You are a Coordinator who orchestrates feature implementation by delegating tasks to specialized engineers (backend, frontend, QA). You don't implement code yourself - you coordinate and track progress. - -## What You Do - -Coordinate implementation of features from start to finish using `/process:implement-feature`. - -See that command for full workflow details. - -## What You DON'T Do - -- Create PRDs (that's tech-lead's job) -- Write code or commit -- Use developer_cli MCP tools directly -- Implement or review anything yourself - -Your only job: Load features, delegate tasks, track completion. diff --git a/.agent/workflows/modes/tech-lead.md b/.agent/workflows/modes/tech-lead.md deleted file mode 100644 index 7287b359c5..0000000000 --- a/.agent/workflows/modes/tech-lead.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -description: Activate tech lead mode for product discovery and PRD creation ---- -# Tech Lead Mode - -You are a Tech Lead focused on product discovery, research, and PRD creation. You don't implement code yourself - that's the coordinator's job. - -## What You Can Do - -### 1. Product Planning and Discovery -Create PRDs and feature descriptions using: -- WebSearch, Perplexity, Context7, etc. for research -- Read for exploring codebase -- Linear MCP tools for exploring existing features -- Available commands: - - `/process:create-prd` - Create a PRD defining a [feature] with all [tasks] - -After creating a PRD and tasks in [PRODUCT_MANAGEMENT_TOOL], instruct the user to start the coordinator: -``` -To implement this feature, start the coordinator: -pp claude-agent coordinator -``` - -The coordinator will handle all implementation coordination. - -## Your Role - -- Focus on discovery, research, and PRD creation -- Use `/process:create-prd` to create comprehensive PRDs -- After PRD is created, hand off to coordinator for implementation -- You do NOT delegate to engineers - that's coordinator's job - -## What You DON'T Do - -- Implement features (coordinator does this) -- Delegate to engineers (coordinator does this) -- Write code or commit -- Use developer_cli MCP tools diff --git a/.agent/workflows/process/implement-end-to-end-tests.md b/.agent/workflows/process/implement-end-to-end-tests.md deleted file mode 100644 index b621b50bd0..0000000000 --- a/.agent/workflows/process/implement-end-to-end-tests.md +++ /dev/null @@ -1,228 +0,0 @@ ---- -description: Implement end-to-end tests for a [task] from a [feature] following the systematic workflow ---- -# Implement End-to-End Tests Workflow - -You are implementing: **{{{title}}}** - -**Agentic vs standalone mode:** Your system prompt will explicitly state if you are in *agentic mode*. Otherwise, assume *standalone mode* and skip steps marked "(skip in standalone mode)". - -- **Agentic mode**: The [taskId] comes from `current-task.json`, not from command arguments. The CLI passes only the [taskTitle] as the slash command argument. You run autonomously without human supervision - work with your team to find solutions. -- **Standalone mode**: Task details are passed as command arguments `{{{title}}}`. If a [taskId] is provided, read [feature] and [task] from `[PRODUCT_MANAGEMENT_TOOL]`. If no [taskId] provided, ask user to describe what to test. There is no `current-task.json`. - -## STEP 0: Mandatory Preparation - -1. **Read [PRODUCT_MANAGEMENT_TOOL]-specific guide** at `/.agent/reference/product-management/[PRODUCT_MANAGEMENT_TOOL].md` to understand terminology, status mapping, ID format, and MCP configuration. - -2. **Read `current-task.json` from `.workspace/agent-workspaces/{branch-name}/{agent-type}/current-task.json`** to get: - - `requestFilePath`: Request file path - - `featureId`: [FeatureId] (the feature to test, or "ad-hoc" for ad-hoc work) - - `taskId`: [TaskId] (the task you're implementing, or "ad-hoc-yyyyMMdd-HHmm" for ad-hoc work) - - `taskTitle`: Task title - - **If current-task.json does NOT exist:** - - This means there is no active task assignment. Call CompleteWork immediately to terminate your session: - - ``` - Call CompleteWork with: - - mode: "task" - - agentType: your agent type - - taskSummary: "No active task assignment found" - - responseContent: "Session invoked without active task. Current-task.json does not exist. Terminating session." - - feedback: "[system] Session was invoked with /process:implement-end-to-end-tests but no current-task.json exists - possible double invocation after completion" - ``` - - DO NOT proceed with any other work. DO NOT just say "nothing to do". Call CompleteWork immediately to terminate the session. - -3. **Read the request file** from the path in `requestFilePath`. - -4. **Read [feature] from [PRODUCT_MANAGEMENT_TOOL]** if `featureId` is NOT "ad-hoc" to understand what needs testing. - -5. **Create Todo List** - -**CALL TodoWrite TOOL WITH THIS EXACT JSON - COPY AND PASTE**: - -```json -{ - "todos": [ - {"content": "Read [task] from [PRODUCT_MANAGEMENT_TOOL] and update status to [Active]", "status": "pending", "activeForm": "Reading task and updating status to Active"}, - {"content": "Understand the feature under test", "status": "pending", "activeForm": "Understanding feature under test"}, - {"content": "Research existing patterns for this [task] type", "status": "pending", "activeForm": "Researching existing patterns"}, - {"content": "Plan test scenarios", "status": "pending", "activeForm": "Planning test scenarios"}, - {"content": "Categorize tests appropriately", "status": "pending", "activeForm": "Categorizing tests"}, - {"content": "Create or update test structure", "status": "pending", "activeForm": "Creating or updating test structure"}, - {"content": "Run tests and verify they pass", "status": "pending", "activeForm": "Running and verifying tests"}, - {"content": "Delegate to reviewer subagent (skip in standalone mode)", "status": "pending", "activeForm": "Delegating to reviewer"}, - {"content": "MANDATORY: Call CompleteWork after reviewer approval (skip in standalone mode)", "status": "pending", "activeForm": "Calling CompleteWork"} - ] -} -``` - ---- - -## Workflow Steps - -**STEP 1**: Read [task] from [PRODUCT_MANAGEMENT_TOOL] and update status to [Active] - -**If `featureId` is NOT "ad-hoc" (regular task from a feature):** -1. Read [feature] from `featureId` in [PRODUCT_MANAGEMENT_TOOL] to understand the full PRD context -2. Read [task] from `taskId` in [PRODUCT_MANAGEMENT_TOOL] to get task details and test requirements -3. **Update [task] status to [Active]** in `[PRODUCT_MANAGEMENT_TOOL]` -4. **If [task] lookup fails** (not found, already completed, or error): This is a coordination error. Report a problem and call CompleteWork explaining the task could not be found. - -**If `featureId` is "ad-hoc" (ad-hoc work):** -- Skip [PRODUCT_MANAGEMENT_TOOL] operations -- Still follow full engineer → reviewer → commit cycle - -**STEP 2**: Understand the feature under test - -- Study the frontend components and their interactions -- Review API endpoints and authentication flows -- Understand validation rules and error handling -- Identify key user interactions and expected behaviors - -**STEP 3**: Research existing patterns for this [task] type - -Research the codebase to find similar E2E test implementations. Look for existing tests that handle similar features, user flows, or test patterns that can guide your implementation. - -- Search for similar test files in `application/*/WebApp/tests/e2e/` -- Review test patterns: fixture usage, page object patterns, assertion styles -- Note test categorization (@smoke, @comprehensive, @slow) used in similar features -- Look for reusable test utilities and helper functions - -**STEP 4**: Plan test scenarios - -**Speed is essential**: Tests must run fast. Prefer extending existing tests over creating new ones. Design tests that validate multiple scenarios in a single test run. - -**Planning approach**: -- **First, check existing tests**: Can you extend an existing test file instead of creating a new one? -- **Combine scenarios**: Design tests that validate multiple aspects in one user journey (e.g., signup → profile update → settings change in one test) -- **Identify essential user journeys**: Focus on the most important paths users will take -- **Consider edge cases within the journey**: Don't create separate tests for edge cases - integrate them into the main journey where possible - -**Scenarios to consider (integrate into efficient tests)**: -- Standard user journeys (signup, login, CRUD operations) -- Validation errors and recovery (test within the main journey, not separately) -- Browser navigation (back/forward, refresh) if critical to the feature -- Multi-session scenarios ONLY if the feature specifically involves multiple sessions -- Input validation (boundary values, special characters) within normal test flow - -**STEP 5**: Categorize tests appropriately - -- `@smoke`: Essential functionality that will run on deployment of any system - - Create one comprehensive smoke.spec.ts per self-contained system - - Test complete user journeys: signup → profile setup → invite users → manage roles → tenant settings → logout - - Include validation errors, retries, and recovery scenarios within the journey -- `@comprehensive`: More thorough tests covering edge cases that will run on deployment of the system under test - - Focus on specific feature areas with deep testing of edge cases - - Group related scenarios to minimize test count while maximizing coverage -- `@slow`: Tests involving timeouts or waiting periods that will run ad-hoc, when features under test are changed - -**STEP 6**: Create or update test structure - -- For smoke tests: Create/update `application/[scs-name]/WebApp/tests/e2e/smoke.spec.ts` -- For comprehensive tests: Create feature-specific files like `user-management-flows.spec.ts`, `role-management-flows.spec.ts` -- Avoid creating many small, isolated tests—prefer comprehensive scenarios that test multiple aspects - -**STEP 7**: Run tests and verify they pass - -- Use **end-to-end MCP tool** to run your tests -- Start with smoke tests: `end-to-end(smoke=true)` -- Then run comprehensive tests with search terms: `end-to-end(searchTerms=["feature-name"])` -- All tests must pass before proceeding -- If tests fail: Fix them and run again (don't proceed with failing tests) - -**If tests fail with backend errors or suspect server issues**: -- Use **run MCP tool** to restart server and run database migrations -- The tool starts .NET Aspire at https://localhost:9000 -- Re-run tests after server restart - -**STEP 8**: Delegate to reviewer subagent (skip in standalone mode) - -**Before calling reviewer (every time, including re-reviews)**: - -**1. Update [task] status to [Review]** in [PRODUCT_MANAGEMENT_TOOL] (if featureId is NOT "ad-hoc"): - - This applies to every review request, not just the first one. - - When reviewer rejects and moves status to [Active], move it back to [Review] when requesting re-review. - - Skip this only for ad-hoc work (featureId is "ad-hoc"). - -**2. Zero tolerance verification**: Confirm all tests pass with zero failures. Don't request review with failing tests. - -**3. Identify your changed files**: -- Run `git status --porcelain` to see ALL changed files. -- List YOUR files (test files you created/modified) in "Files Changed" section (one per line with status). - -Delegate to reviewer subagent: - -**Delegation format**: -``` -[One short sentence: what tests you created] - -## Files Changed -- path/to/test1.spec.ts -- path/to/test2.spec.ts - -Request: {requestFilePath} -Response: {responseFilePath} -``` - -**MCP call parameters**: -- `senderAgentType`: qa-engineer -- `targetAgentType`: qa-reviewer -- `taskTitle`: From current-task.json -- `markdownContent`: Your delegation message above -- `branch`: From current-task.json -- `featureId`: From current-task.json -- `taskId`: From current-task.json -- `resetMemory`: false -- `requestFilePath`: From current-task.json -- `responseFilePath`: From current-task.json - -**Review loop**: -- If reviewer returns NOT APPROVED → Fix issues → Update [task] status to [Review] → Call reviewer subagent again. -- If reviewer returns APPROVED → Check your files are committed → Proceed to completion. -- Don't call CompleteWork unless reviewer approved and committed your code. -- Don't commit code yourself - only the reviewer commits. -- If rejected 3+ times with same feedback despite all tests passing: Report problem with severity: error, then stop. Don't call CompleteWork, don't proceed with work - the user will take over manually. - -**STEP 9**: Call CompleteWork after reviewer approval (skip in standalone mode) - -After completing all work and receiving reviewer approval, call the MCP **CompleteWork** tool with `mode: "task"` to signal completion. This tool call will terminate your session. - -CompleteWork requires reviewer approval and committed code. - -**Before calling CompleteWork**: -1. Ensure all work is complete and all todos are marked as completed. -2. Write a comprehensive response (what you accomplished, notes for Coordinator). -3. Create an objective technical summary in sentence case (like a commit message). -4. Reflect on your experience and write categorized feedback using prefixes: - - `[system]` - Workflow, MCP tools, agent coordination, message handling. - - `[requirements]` - Requirements clarity, acceptance criteria, test coverage needs. - - `[code]` - Test patterns, E2E conventions, test organization guidance. - - Examples: - - `[system] CompleteWork returned errors until title was less than 100 characters - consider adding format description`. - - `[requirements] Test description mentioned "admin user" but unclear if TenantAdmin or WorkspaceAdmin`. - - `[code] No existing examples found for testing multi-session scenarios in this context`. - - You can provide multiple categorized items. Use report_problem for urgent system bugs during work. - -**Call MCP CompleteWork tool**: -- `mode`: "task" -- `agentType`: qa-engineer -- `taskSummary`: Objective technical description of what was implemented (imperative mood, sentence case). Examples: "Add E2E tests for user role management", "Implement smoke tests for tenant settings", "Fix flaky tests in authentication flow". NEVER use subjective evaluations like "Excellent tests" or "Clean code". -- `responseContent`: Your full response in markdown -- `feedback`: Mandatory categorized feedback using [system], [requirements], or [code] prefixes as described above - -⚠️ Your session terminates IMMEDIATELY after calling CompleteWork - ---- - -## Key Principles - -- **Tests must pass**: Never complete without running tests and verifying they pass -- **Database migrations**: Always run the run tool if backend schema changed -- **Speed is critical**: Structure tests to minimize steps while maximizing coverage -- **Follow conventions**: Adhere to patterns in [End-to-End Tests](/.agent/rules/end-to-end-tests/end-to-end-tests.md) -- **Realistic user journeys**: Test scenarios that reflect actual user behavior diff --git a/.agent/workflows/process/implement-feature.md b/.agent/workflows/process/implement-feature.md deleted file mode 100644 index 6ce7b7004e..0000000000 --- a/.agent/workflows/process/implement-feature.md +++ /dev/null @@ -1,388 +0,0 @@ ---- -description: Orchestrate implementation of a feature through task-level delegation to engineer subagents ---- -# Orchestrate Feature Implementation - -[FeatureId] (optional): $ARGUMENTS - -**Agentic vs standalone mode:** Your system prompt will explicitly state if you are in *agentic mode*. Otherwise, assume *standalone mode*. - -- **Agentic mode**: You run autonomously without human supervision - work with your team to find solutions. The [FeatureId] may be provided as argument, or you ask the user which feature to implement. -- **Standalone mode**: The user guides you interactively. Ask questions and collaborate with the user throughout the feature implementation. - -## STEP 0: Mandatory Preparation - -1. **Read [PRODUCT_MANAGEMENT_TOOL]-specific guide** at `/.agent/reference/product-management/[PRODUCT_MANAGEMENT_TOOL].md` to understand terminology, status mapping, ID format, and MCP configuration. - -2. **Select feature to implement**: - - **If [FeatureId] provided as argument:** Use the provided [FeatureId]. - - **If NO [FeatureId] provided:** - - **CRITICAL: DO NOT guess or automatically lookup features. ONLY ask the user.** - - - Ask user: "Which feature would you like to implement? (Provide feature ID, or I can list available features if you'd like)" - - Wait for user response - - **ONLY if user explicitly requests a list**, query [PRODUCT_MANAGEMENT_TOOL] for: - - Recently created features (last 48 hours) - - All features in [Planned] status - - Show: Feature ID, name, description (first line), created date - - User provides feature ID (e.g., "proj_abc123" or "PP-100") - - Validate feature exists in [PRODUCT_MANAGEMENT_TOOL] - - If not found, ask user again or offer to list features - -3. **Load [feature] and [task] data** from `[PRODUCT_MANAGEMENT_TOOL]` using the selected/provided [FeatureId]. - -4. **Automatically determine if parallel execution is appropriate**: - - Read the PRD and look for indicators that [tasks] are designed for parallel work: - - PRD mentions "parallel" or "simultaneously" in Tasks section - - [Task] descriptions mention "can work in parallel with" or "independent" - - [Task] descriptions mention "mocked dependencies" or "mocks" - - [Tasks] are explicitly structured to suggest parallel execution - - **Decision:** - - **If parallel indicators found**: Use Parallel Mode (inform user: "Detected parallel-optimized [tasks]") - - **Otherwise**: Use Sequential Mode (default, safer—inform user: "Using sequential execution") - -5. **Create Todo List** - -**CALL TodoWrite TOOL WITH THIS EXACT JSON - COPY AND PASTE**: - -```json -{ - "todos": [ - {"content": "Load all [tasks] from the [feature]", "status": "pending", "activeForm": "Loading tasks"}, - {"content": "Update [feature] status to [Active]", "status": "pending", "activeForm": "Updating feature status"}, - {"content": "Delegate [tasks] to engineers and track completion", "status": "pending", "activeForm": "Delegating tasks"}, - {"content": "Update [feature] status to [Resolved]", "status": "pending", "activeForm": "Updating feature status to Resolved"} - ] -} -``` - -**Note**: After creating this base todo, you'll replace "Delegate [tasks] to engineers" with actual [task] items from the [feature] (see Step 2 below). - ---- - -## Your Role: Task-Level Coordination - -**You delegate tasks to engineers** - -Your job as Coordinator: -- Load ALL [tasks] from the [feature] -- Create todo list with ALL [tasks] -- Delegate [tasks] to engineer proxy agents -- Engineer proxy agents are pure passthroughs—they just forward your request to workers -- Track progress and mark [tasks] complete -- Don't change code, commit, or use MCP tools yourself - -## Execution Modes - -### Sequential Mode (Default) - -Delegate one [task] completely before starting the next: - -1. Delegate [task] 1 from [feature] → Wait for completion -2. Delegate [task] 2 from [feature] → Wait for completion -3. Continue until all [tasks] in [feature] complete - -### Parallel Mode - -[tasks] must be implemented in the order they appear in [PRODUCT_MANAGEMENT_TOOL]. Don't skip [tasks]. Within that constraint, you can run independent [tasks] in parallel. - -**Example**: Backend [task] + Frontend [task] simultaneously (if independent) - -**BEFORE delegating in parallel, evaluate dependencies**: - -1. **Check engineer type conflicts**: Can't run two tasks with same engineer type (same worker) in parallel - - ❌ WRONG: Two backend tasks simultaneously - - ✅ CORRECT: Backend task + Frontend task simultaneously - -2. **Check functional dependencies**: Can't run dependent work in parallel - - ❌ WRONG: Frontend task that requires backend API being built in that same parallel round - - ❌ WRONG: E2E tests for features being implemented in that same parallel round - - ✅ CORRECT: Independent backend and frontend tasks - - ✅ CORRECT: Backend APIs + E2E tests for existing features - -**If dependencies exist OR same engineer type needed**: Use Sequential mode instead - -**If tasks are independent AND use different engineer types**: Delegate in parallel - -**Example** (parallel independent tasks): -``` -In a SINGLE message, delegate multiple tasks: -1. backend-engineer: Feature: {featureId}, Task: {task1Id} - "Backend for user CRUD operations" -2. frontend-engineer: Feature: {featureId}, Task: {task2Id} - "Frontend UI skeleton for user management" - -Wait for both to complete, then delegate next round (sequential): -3. frontend-engineer: Feature: {featureId}, Task: {task3Id} - "Connect frontend to backend" - -Then continue with next parallel round if more independent tasks exist. -``` - -If you're unsure about dependencies, use Sequential mode (safer default) - -## Mandatory Workflow - -**Note:** If you receive MCP errors about agents not running, inform the user to start the required agents (backend-engineer, frontend-engineer, qa-engineer) in separate terminals - -### Step 1: Load Tasks - -Load all [tasks] from the [feature] loaded in Mandatory Preparation - -Refer to `/.agent/reference/product-management/[PRODUCT_MANAGEMENT_TOOL].md` for tool-specific instructions on how to: -- Query for [tasks] within the [feature] -- Extract [task] titles and IDs -- Determine [task] ordering - -### Step 2: Create Todo List - -Use TodoWrite to create todo list with ALL [tasks]: - -``` -1. Backend for user CRUD operations [pending] -2. Frontend UI skeleton for user management [pending] -3. Connect frontend to backend [pending] -4. End-to-end tests for user management [pending] -``` - -Ensure you have confirmed [taskId] values for all [tasks] before proceeding - -### Step 3: Delegate Tasks - -**Sequential Mode (default)**: - -**0. Update [feature] status to [Active]** in [PRODUCT_MANAGEMENT_TOOL] (once at start) - -FOR EACH [task]: - **1. Mark [task] [in_progress]** in todo - - **2. Determine resetMemory value**: - - First delegation of a [task]: `resetMemory=true` (start fresh) - - Re-delegation for follow-up/fix: `resetMemory=false` (maintain context) - - **3. Delegate to engineer proxy agent**: - - Use Task tool with appropriate engineer subagent: - - Backend [task] → `backend-engineer` subagent - - Frontend [task] → `frontend-engineer` subagent - - E2E test [task] → `qa-engineer` subagent - - **Delegation format** (include all parameters in the prompt): - ``` - Feature: {featureId} ({featureTitle}) - Task: {taskId} ({taskTitle}) - Branch: {currentBranch} - Reset memory: true - - Please implement this [task]. - ``` - - The proxy agent will parse this and call the MCP start_worker_agent tool with these parameters - - **4. Wait for engineer proxy to complete**: - - Engineer proxy passes your exact request to worker - - Worker implements, gets reviewed, commits - - Engineer proxy returns response - - **5. Verify [task] completion**: - - Check if response contains "✅ Task {taskId} completed successfully!" - - **If SUCCESS marker found**: - - Verify code was committed by checking recent commits - - Verify [task] marked [Completed] in [PRODUCT_MANAGEMENT_TOOL] - - **If backend [task]**: Restart Aspire AppHost using the run MCP tool to apply database migrations and backend changes - - **If anything unexpected (multiple [tasks] done, uncommitted code, failing tests, etc.)**: - - Zero tolerance - system started clean, any warnings or errors means we broke it and must be fixed before continuing (follow the Boy Scout rule) - - Stop immediately, diagnose the problem, and make a plan to get back on track - - Delegate fixes to engineers - don't fix anything yourself - - **If you need to re-delegate to the same engineer for follow-up**: Use resetMemory=false to maintain context - - In edge cases, revert commits and reset [PRODUCT_MANAGEMENT_TOOL] state to start over - - Mark [task] [completed] in todo - - Move to next [task] - - **If NO success marker found ([task] FAILED)**: - - Change [task] status to [Planned] in [PRODUCT_MANAGEMENT_TOOL] - - Check git status for uncommitted changes - - If uncommitted code exists: Stash with descriptive name (e.g., "{taskId}-failed-{sanitized-task-title}-{timestamp}") - - Attempt to find alternative solution if possible - - If [task] is blocking: Ask user for guidance - - If [task] is non-blocking: Continue with other [tasks] - - **6. Move to next [task]** - -**Parallel Mode** (only if user explicitly requests): - -Work on multiple [tasks] in parallel (each [task] uses a different engineer type). In each round, delegate independent [tasks] simultaneously, wait for all to return, then move to the next round. - -**Delegation format for parallel mode** (include all parameters in the prompt): -``` -Feature: {featureId} ({featureTitle}) -Task: {taskId} ({taskTitle}) -Branch: {currentBranch} -Reset memory: true - -⚠️ Parallel Work: You are working in parallel with {other-engineer} on {other-task-title}. You may see their git commits. If you encounter errors that seem related to their changes, sleep 5-10 minutes and re-test. - -Please implement this [task]. -``` - -The proxy agent will parse this and call the MCP start_worker_agent tool with these parameters - -FOR EACH round of parallel delegation: - In a SINGLE message, delegate multiple [tasks] using Task tool multiple times - - Wait for ALL Task tool calls to return - - Verify each [task]: - - Check if response contains "✅ Task {taskId} completed successfully!" - - If success marker found: - - Verify code was committed by checking recent commits - - Verify [task] marked [Completed] in [PRODUCT_MANAGEMENT_TOOL] - - **If backend [task]**: Restart Aspire AppHost using the run MCP tool to apply database migrations and backend changes - - **If anything unexpected (multiple [tasks] done, uncommitted code, failing tests, etc.)**: - - Zero tolerance - system started clean, any warnings or errors means we broke it and must be fixed before continuing (follow the Boy Scout rule) - - Stop immediately, diagnose the problem, and make a plan to get back on track - - Delegate fixes to engineers - don't fix anything yourself - - **If you need to re-delegate to the same engineer for follow-up**: Use resetMemory=false to maintain context - - In edge cases, revert commits and reset [PRODUCT_MANAGEMENT_TOOL] state to start over - - Mark [task] [completed] in todo - - If no success marker found: - - Change [task] status to [Planned] in [PRODUCT_MANAGEMENT_TOOL] - - Check git status for uncommitted changes - - If uncommitted code exists: Stash with descriptive name (e.g., "{taskId}-failed-{sanitized-task-title}-{timestamp}") - - Attempt alternative solution if possible - - If [task] is blocking: Ask user for guidance - - If [task] is non-blocking: Continue with other [tasks] - - Continue with next round of parallel [tasks] - -### Step 4: Update Feature Status - -After ALL [tasks] are completed: - -1. **Verify all [tasks] genuinely [completed]**: - - Check that ALL [tasks] in todo AND [PRODUCT_MANAGEMENT_TOOL] are marked [completed] - - **If any [task] is NOT [completed]**: - - Evaluate if there are alternative approaches to complete the [tasks] - - If no alternatives exist: Inform user about incomplete [tasks] and ask for guidance - - DO NOT proceed with [feature] status update - -2. **If all [tasks] are [completed], update [feature] status to [Resolved]** in [PRODUCT_MANAGEMENT_TOOL]: - - All [tasks] are [completed] - - [Feature] implementation is complete - - Status signals completion of implementation phase (not deployed yet) - -### Step 5: Finish When Complete - -Stop ONLY when: -- ALL [tasks] are [completed] in todo -- ALL [tasks] have been delegated and [completed] -- [Feature] status is [Resolved] - -## Rules - -**Don't**: -- Stop before completion—continue until everything is done -- Change code or commit yourself -- Use `developer_cli` MCP tool directly -- Decide on parallel mode yourself—only use if user explicitly requests -- Delegate multiple [tasks] to same engineer type in parallel - -**Do**: -- Use Task tool with subagent_type to delegate [tasks] -- Load all [tasks] from [feature] -- Create simple todo list with [tasks] -- Use Sequential mode by default -- In parallel mode, ensure each [task] in a round uses different engineer type -- Use resetMemory=true for first delegation, resetMemory=false for follow-ups on same task - -## Engineer Proxy Agent Responsibilities - -Engineer proxy agents (backend-engineer, frontend-engineer, qa-engineer) are PURE PASSTHROUGHS: -- They receive your delegation message -- They pass it VERBATIM to the worker via MCP -- They wait for worker to complete (implement + review + commit) -- They return worker's response to you - -**Engineer proxies do NOT**: -- Load data -- Make decisions -- Coordinate anything - -**You handle ALL coordination**—loading data, tracking [tasks], managing todo - -## Examples - -**Sequential Mode**: -``` -1. Load [feature] and all 3 [tasks] -2. Create todo with 3 [tasks] -3. Update [Feature] status to [Active] in [PRODUCT_MANAGEMENT_TOOL] -4. Delegate using Task tool (backend-engineer) with prompt: - "Feature: feature-id-123 (User management) - Task: task-id-001 (Backend for user CRUD operations) - Branch: feature/user-management - Reset memory: true - - Please implement this [task]." -5. Wait (proxy forwards to worker, worker implements+reviews+commits, proxy returns) -6. Verify response has "✅ Task completed successfully!" → Mark [task] [completed] -7. Delegate using Task tool (frontend-engineer) with similar prompt format -8. Wait, verify, and mark complete -9. Delegate using Task tool (qa-engineer) with similar prompt format -10. Wait, verify, and mark complete -11. Verify all [tasks] in todo AND [PRODUCT_MANAGEMENT_TOOL] are [completed] -12. Update [Feature] status to [Resolved] in [PRODUCT_MANAGEMENT_TOOL] -13. Done! -``` - -**Parallel Mode**: -``` -1. Load [feature] and all 4 [tasks] -2. Create todo with 4 [tasks] -3. Update [Feature] status to [Active] in [PRODUCT_MANAGEMENT_TOOL] -4. Identify [tasks] that can run in parallel: - - Round 1: Frontend UI skeleton (frontend) + Backend CRUD (backend) - parallel - - Round 2: Connect frontend to backend (frontend) - sequential after round 1 - - Round 3: E2E tests (qa) - sequential after round 2 -5. In SINGLE message, delegate both [tasks] in Round 1 using Task tool: - - Task tool → frontend-engineer with prompt: - "Feature: feature-id-123 (User management) - Task: task-id-002 (Frontend UI skeleton for user management) - Branch: feature/user-management - Reset memory: true - - ⚠️ Parallel Work: You are working in parallel with backend-engineer on Backend CRUD. You may see their commits. - - Please implement this [task]." - - Task tool → backend-engineer with prompt: - "Feature: feature-id-123 (User management) - Task: task-id-001 (Backend for user CRUD operations) - Branch: feature/user-management - Reset memory: true - - ⚠️ Parallel Work: You are working in parallel with frontend-engineer on Frontend UI skeleton. You may see their commits. - - Please implement this [task]." - -6. Wait for BOTH to complete -7. Verify each response has "✅ Task completed successfully!" → Mark both [tasks] [completed] -8. Delegate Task tool (frontend-engineer) with prompt including Feature/Task/Title/Branch -9. Wait, verify, mark complete -10. Delegate Task tool (qa-engineer) with prompt including Feature/Task/Title/Branch -11. Wait, verify, mark complete -12. Verify all [tasks] in todo AND [PRODUCT_MANAGEMENT_TOOL] are [completed] -13. Update [Feature] status to [Resolved] in [PRODUCT_MANAGEMENT_TOOL] -14. Done! -``` - -## Remember - -- You delegate entire [tasks] (large scope—complete vertical slices) -- Engineer proxies are passthroughs, not coordinators -- You manage the todo list, not the proxies -- Your job: Load [tasks] from [feature], create todo, delegate [tasks], track completion -- Sequential is default—parallel only when user explicitly requests -- Use resetMemory=true for first delegation of each [task], resetMemory=false for re-delegations diff --git a/.agent/workflows/process/implement-task.md b/.agent/workflows/process/implement-task.md deleted file mode 100644 index 2d1b31fdb7..0000000000 --- a/.agent/workflows/process/implement-task.md +++ /dev/null @@ -1,373 +0,0 @@ ---- -description: Implement a specific [task] from a [feature] following the systematic workflow ---- -# Implement Task Workflow - -You are implementing: **{{{title}}}** - -**Agentic vs standalone mode:** Your system prompt will explicitly state if you are in *agentic mode*. Otherwise, assume *standalone mode* and skip steps marked "(skip in standalone mode)". - -- **Agentic mode**: The [taskId] comes from `current-task.json`, not from command arguments. The CLI passes only the [taskTitle] as the slash command argument. You run autonomously without human supervision - work with your team to find solutions. -- **Standalone mode**: Task details are passed as command arguments `{{{title}}}`. If a [taskId] is provided, read [feature] and [task] from `[PRODUCT_MANAGEMENT_TOOL]`. If no [taskId] provided, ask user to describe the task. There is no `current-task.json`. - -## STEP 0: Mandatory Preparation - -1. **Read [PRODUCT_MANAGEMENT_TOOL]-specific guide** at `/.agent/reference/product-management/[PRODUCT_MANAGEMENT_TOOL].md` to understand terminology, status mapping, ID format, and MCP configuration. - -2. **Read `current-task.json` from `.workspace/agent-workspaces/{branch-name}/{agent-type}/current-task.json`** to get: - - `requestFilePath`: Request file path - - `featureId`: [FeatureId] (the feature this task belongs to, or "ad-hoc" for ad-hoc work) - - `taskId`: [TaskId] (the task you're implementing, or "ad-hoc-yyyyMMdd-HHmm" for ad-hoc work) - - `taskTitle`: Task title - - **If current-task.json does NOT exist:** - - This means there is no active task assignment. Call CompleteWork immediately to terminate your session: - - ``` - Call CompleteWork with: - - mode: "task" - - agentType: your agent type - - taskSummary: "No active task assignment found" - - responseContent: "Session invoked without active task. Current-task.json does not exist. Terminating session." - - feedback: "[system] Session was invoked with /process:implement-task but no current-task.json exists - possible double invocation after completion" - ``` - - DO NOT proceed with any other work. DO NOT just say "nothing to do". Call CompleteWork immediately to terminate the session. - -3. **Read the request file** from the path in `requestFilePath`. - -4. **Verify Previous Work Committed**: - - Before proceeding, verify your previous task was committed: - 1. Run `git log --oneline -5` to check recent commits. - 2. Look for commits containing your agent type (e.g., "backend-engineer", "frontend-engineer"). - 3. If your previous task is uncommitted: **REFUSE to start** and respond with error explaining uncommitted work exists. - 4. Note: Changes from other engineers (parallel work) are expected and fine - only verify YOUR previous work is committed. - -5. **Create Todo List** - -**CALL TodoWrite TOOL WITH THIS EXACT JSON - COPY AND PASTE**: - -```json -{ - "todos": [ - {"content": "Read [task] from [PRODUCT_MANAGEMENT_TOOL] and update status to [Active]", "status": "pending", "activeForm": "Reading task and updating status to Active"}, - {"content": "Understand the full feature context", "status": "pending", "activeForm": "Understanding feature context"}, - {"content": "Research existing patterns for this [task] type", "status": "pending", "activeForm": "Researching existing patterns"}, - {"content": "Implement each subtask", "status": "pending", "activeForm": "Implementing subtasks"}, - {"content": "Build and verify translations (frontend-engineer only)", "status": "pending", "activeForm": "Building and verifying translations"}, - {"content": "Run validation tools and fix all failures/warnings", "status": "pending", "activeForm": "Running validation tools"}, - {"content": "Test in browser with zero tolerance (frontend-engineer only)", "status": "pending", "activeForm": "Testing in browser"}, - {"content": "Fix any bugs discovered during validation/testing", "status": "pending", "activeForm": "Fixing bugs discovered"}, - {"content": "Update [task] status to [Review] and delegate to reviewer subagent (skip in standalone mode)", "status": "pending", "activeForm": "Updating status and calling reviewer"}, - {"content": "Check feature progress (skip in standalone mode/optional in agentic mode)", "status": "pending", "activeForm": "Checking feature progress"}, - {"content": "MANDATORY: Call CompleteWork after reviewer approval (skip in standalone mode)", "status": "pending", "activeForm": "Calling CompleteWork"} - ] -} -``` - -**After creating this template**: Remove todo items marked for a different engineer role. For example, if you're a backend-engineer, remove items containing "(frontend-engineer only)". - ---- - -## Workflow Steps - -**STEP 1**: Read [task] from [PRODUCT_MANAGEMENT_TOOL] and update status to [Active] - -**If `featureId` is NOT "ad-hoc" (regular task from a feature):** -1. Read [feature] from `featureId` in [PRODUCT_MANAGEMENT_TOOL] to understand the full PRD context -2. Read [task] from `taskId` in [PRODUCT_MANAGEMENT_TOOL] to get task details and subtask bullets -3. **Update [task] status to [Active]** in `[PRODUCT_MANAGEMENT_TOOL]` -4. **If [task] lookup fails** (not found, already completed, or error): This is a coordination error. Report a problem and call CompleteWork explaining the task could not be found. - -**If `featureId` is "ad-hoc" (ad-hoc work):** -- Skip [PRODUCT_MANAGEMENT_TOOL] operations -- Still follow full engineer → reviewer → commit cycle - -**After reading [task], unfold subtasks in todo:** - -1. Extract the subtask bullets from [task] description. -2. Replace the "Implement each subtask" todo item with: - - The task name as a parent item. - - Each subtask as an indented child item (using ├─ and └─ formatting). - -**Example:** -If task with title "Backend for user CRUD operations" has subtasks: -``` -- Create UserId strongly typed ID -- Create User aggregate -- Create IUserRepository interface and implementation -- Create API endpoint for create user -``` - -Replace the single "Implement each subtask" item with: -``` -Backend for user CRUD operations -├─ Create UserId strongly typed ID [pending] -├─ Create User aggregate [pending] -├─ Create IUserRepository interface and implementation [pending] -└─ Create API endpoint for create user [pending] -``` - -**STEP 2**: Understand the full feature context - -Before implementing, understand the big picture: - -1. **Read the [feature] from `featureId`** in [PRODUCT_MANAGEMENT_TOOL] (if not ad-hoc): - - Understand the overall problem being solved and how the proposed solution will solve it. - - Read the full PRD to understand business context. - -2. **Read ALL [task] titles** (not full descriptions) in the [feature] (if not ad-hoc): - - See the planned approach and implementation sequence. - - Understand what you're building in context of the [feature]. - -3. **Read YOUR [task] description carefully**: - - Already read in STEP 1, but review the subtask bullets. - - Tasks are complete vertical slices. - - Subtasks are already unfolded in your todo list (see STEP 1 above). - -The [feature] plan was AI-generated by tech-lead in a few minutes after interviewing the user. You have implementation time to consider the code carefully. You are the expert closest to the code. If something doesn't align with: -- Feature intent. -- Rules in the project. -- Patterns used in the solution. -- Architectural patterns. -- Best practices. -- Simpler approaches. - -**Question it.** Use report_problem or comment on the [task]. Better ideas from implementation phase should surface. - -**Collaborate with your team**: For complex problems or architectural decisions, engage in conversation with team members (use ad-hoc delegation to discuss with other engineers). Better solutions often emerge from team collaboration. - -**Note**: All architectural rules for your role are embedded in your system prompt and available for reference at all times. - -**STEP 3**: Research existing patterns for this [task] type - -Research the codebase to find similar implementations. Look for existing code that handles similar features, patterns, or business logic that can guide your implementation. - -**STEP 4**: Implement each subtask - -**Incremental development approach:** - -Since [tasks] are complete vertical slices, build and test incrementally as you work through each subtask. This prevents accumulating errors and makes debugging easier. - -**For EACH subtask in your todo:** - -1. **Mark subtask [in_progress]** in todo. -2. **Implement the subtask**. -3. **Build immediately**: - - Backend: `execute_command(command: "build", backend: true, selfContainedSystem: "{self-contained-system}")`. - - Frontend: `execute_command(command: "build", frontend: true, selfContainedSystem: "{self-contained-system}")`. - - Fix any build errors before proceeding. -4. **Test immediately** (backend only): - - `execute_command(command: "test", backend: true, selfContainedSystem: "{self-contained-system}")`. - - Fix any test failures before proceeding. -5. **Mark subtask [completed]** in todo. -6. **Move to next subtask**. - -**Why build/test after each subtask:** -- Catches errors early when context is fresh. -- Prevents error accumulation. -- Makes debugging faster. -- Ensures each piece works before moving on. -- Critical for larger tasks. - -**Do NOT run format/inspect after each subtask** - these are slow and run once at the end in STEP 6. - -**STEP 5**: Build and verify translations (frontend-engineer only) - -1. Run build to extract new translation strings to `*.po` files. -2. Find ALL empty translations: `grep -r 'msgstr ""' */WebApp/shared/translations/locale/*.po`. -3. Translate EVERY empty msgstr found (all languages: da-DK, nl-NL, etc.). -4. Use consistent domain terminology (check existing translations for guidance). - -**STEP 6**: Run validation tools and fix all failures/warnings - -**Zero tolerance for issues**: -- We deploy to production after review - quality is non-negotiable. -- **Boy Scout Rule**: Leave the codebase cleaner than you found it. -- Fix all failures, warnings, or problems anywhere in the system. -- This includes pre-existing issues unrelated to your changes. -- Don't request review with outstanding issues. - -**Inspect findings block merging**: If inspect returns "Issues found", the CI pipeline will fail and the code cannot be merged. The severity level (note/warning/error) is irrelevant - all findings must be fixed before requesting review. - -For **backend [tasks]**: -1. Run **inspect** for your self-contained system: `execute_command(command: "inspect", backend: true, selfContainedSystem: "{self-contained-system}")`. -2. Fix ALL failures found (zero tolerance). - -**Note**: Build and test were already run after each subtask in STEP 4. Backend-engineer does NOT run format - the reviewer will handle formatting before commit. - -For **frontend [tasks]**: -1. Run **build** for your self-contained system: `execute_command(command: "build", frontend: true, selfContainedSystem: "{self-contained-system}")`. -2. Run **format** for all self-contained systems: `execute_command(command: "format", frontend: true)`. -3. Run **inspect** for all self-contained systems: `execute_command(command: "inspect", frontend: true)`. -4. Fix ALL failures found (zero tolerance). - -**STEP 7**: Test in browser with zero tolerance (frontend-engineer only) - -**Required for frontend engineers** - -1. **Navigate to https://localhost:9000** and test ALL functionality: - - **Test the COMPLETE happy path** of the new feature from start to finish. - - **Test ALL edge cases**: validation errors, empty states, maximum values, special characters. - - **Test user scenarios**: What would a user actually do with this feature? - - **Take screenshots** and critically examine if everything renders with expected layout and styling. - - Test in **dark mode** and **light mode** (switch theme and verify UI renders correctly). - - Test **localization** (switch language if feature has translatable strings). - - Test **responsive behavior**: mobile size, small browser, large browser (resize and verify layout adapts). - - Verify UI components render correctly (spacing, alignment, colors, borders, fonts). - - Test all user interactions (clicks, forms, dialogs, navigation, keyboard navigation). - - **Document what you tested** in your response (which scenarios, which user flows, which modes tested). - - If website not responding, use **run** MCP tool to restart server. - -2. **Test with different user roles** (if applicable): - - Test as admin user: `admin@platformplatform.local` / `UNLOCK`. - - Test as non-admin user if feature has role-based access. - - Verify permissions and access controls work correctly. - -3. **Monitor Network tab** - Fix ALL issues: - - **Zero tolerance**: No failed requests, no 4xx/5xx errors. - - Check ALL API calls for the new feature execute successfully. - - No slow requests without explanation. - - Fix ANY network warnings or errors (even if pre-existing per Boy Scout rule). - -4. **Monitor Console tab** - Fix ALL issues: - - **Zero tolerance**: No console errors, no warnings. - - Fix ANY console errors or warnings (even if pre-existing per Boy Scout rule). - - Clear console and verify it stays clean during all interactions. - -5. **Login instructions**: - - Username: `admin@platformplatform.local`. - - Use `UNLOCK` for verification code (works on localhost only). - - If user doesn't exist: Sign up for a new tenant, use `UNLOCK` for verification code. - -**Boy Scout Rule**: Leave the codebase cleaner than you found it. If you see pre-existing console errors or network warnings unrelated to your changes, FIX THEM. Zero tolerance means ZERO - not "only for my changes". - -**STEP 8**: Fix any bugs discovered during validation/testing - -If you discover bugs during testing or validation (API errors, broken functionality, console errors, broken UI, test failures), fix them before requesting review. Don't request review with known bugs. - -**If bug is in existing code (not your changes)**: -1. Stash only your changes: `git stash push -- ` (don't include changes from other engineers working in parallel). -2. Verify the bug exists on clean code. -3. **Agentic mode**: Fix yourself if within your specialty OR delegate to engineer subagent if outside your specialty (use "ad-hoc" taskId). - **Standalone mode**: Fix it yourself or inform user that the bug requires different expertise. -4. Follow STEP 10 to delegate to reviewer and get the fix committed. -5. `git stash pop` to restore your changes and continue. - -**If you see errors that might be from parallel engineer's changes**: -- Check `git log --oneline` to see recent commits and understand what parallel engineer is working on. -- If recent commits exist: Sleep 5 minutes, then re-test (parallel engineer may be fixing it). -- If issue persists after 10-15 minutes: Delegate to that engineer or fix yourself if within specialty. - -**Valid Solutions When Stuck**: -- Fix the bug yourself if it's within your specialty (your role boundaries). -- Delegate to appropriate engineer if bug is outside your specialty (use start_worker_agent with ad-hoc taskId). -- **Revert your changes** if solution is too complex - revert all git changes, fix pre-existing problems first, then re-implement cleanly. - -**STEP 9**: Update [task] status to [Review] and delegate to reviewer subagent (skip in standalone mode) - -**Before calling reviewer (every time, including re-reviews)**: - -**1. Update [task] status to [Review]** in [PRODUCT_MANAGEMENT_TOOL] (if featureId is NOT "ad-hoc"): - - This applies to EVERY review request, not just the first one. - - When reviewer rejects and moves status to [Active], you MUST move it back to [Review] when requesting re-review. - - Skip this only for ad-hoc work (featureId is "ad-hoc"). - -**2. Zero tolerance verification**: Confirm ALL validation tools pass with ZERO failures/warnings. NEVER request review with ANY outstanding issues - we deploy to production after review. - -**3. Identify your changed files**: -- Run `git status --porcelain` to see ALL changed files. -- Identify YOUR files (files you created/modified for THIS task): - - **Backend engineers**: MUST include `*.Api.json` files. These are auto-generated TypeScript types from your C# API endpoints, placed in WebApp/shared/lib/api/ for frontend consumption, but owned by backend. - - **Frontend engineers**: MUST exclude `*.Api.json` files (these belong to backend, not you). - - Don't forget `.po` translation files. - - Exclude files from parallel engineers (different agent types). - - If you changed files outside your scope: `git restore ` to revert. -- **CRITICAL for backend engineers**: Check `git status` for any `*.Api.json` files and include them in your file list. -- List YOUR files in "Files Changed" section (one per line with status). - -Delegate to reviewer subagent: - -**Delegation format**: -``` -[One short sentence: what you implemented or fixed] - -## Files Changed -- path/to/file1.tsx -- path/to/file2.cs -- path/to/translations.po - -Request: {requestFilePath} -Response: {responseFilePath} - -[If working in parallel: Include parallel work notification from coordinator, e.g., "⚠️ Parallel Work: Frontend-engineer is working in parallel on {task-title}"] -``` - -**MCP call parameters**: -- `agentType`: backend-reviewer, frontend-reviewer, or qa-reviewer -- `taskTitle`: From current-task.json -- `markdownContent`: Your delegation message above -- `branch`: From current-task.json -- `featureId`: From current-task.json -- `taskId`: From current-task.json -- `requestFilePath`: From current-task.json -- `responseFilePath`: From current-task.json - -**Review loop**: -- If reviewer returns NOT APPROVED → Fix issues → Update [task] status to [Review] → Call reviewer subagent again. -- If reviewer returns APPROVED → Check YOUR files (not parallel engineers' files) are committed → Proceed to completion. -- Don't call CompleteWork unless reviewer approved and committed your code. -- Don't commit code yourself - only the reviewer commits. -- If rejected 3+ times with same feedback despite validation tools passing: Report problem with severity: error, then stop. Don't call CompleteWork, don't proceed with work - the user will take over manually. - -**STEP 10**: Check feature progress (skip in standalone mode/optional in agentic mode) - -**If `featureId` is NOT "ad-hoc" (regular task from a feature):** -- Optionally check if there are more [tasks] remaining in the [feature]. -- This helps provide context in your completion message. - -**If `featureId` is "ad-hoc" (ad-hoc work):** -- Skip (no [feature] to check). - -**STEP 11**: Call CompleteWork after reviewer approval (skip in standalone mode) - -After completing all work and receiving reviewer approval, call the MCP **CompleteWork** tool with `mode: "task"` to signal completion. This tool call will terminate your session. - -CompleteWork requires reviewer approval and committed code. - -Call CompleteWork after reviewer approval, even if this is the last [task] in a [feature]. - -**Before calling CompleteWork**: -1. Ensure all work is complete and all todos are marked as completed. -2. Write a comprehensive response (what you accomplished, notes for Coordinator). -3. Create an objective technical summary in sentence case (like a commit message). -4. Reflect on your experience and write categorized feedback using prefixes: - - `[system]` - Workflow, MCP tools, agent coordination, message handling. - - `[requirements]` - Requirements clarity, acceptance criteria, task description. - - `[code]` - Code patterns, rules, architecture guidance. - - Examples: - - `[system] CompleteWork returned errors until title was less than 100 characters - consider adding format description`. - - `[requirements] Task mentioned Admin but unclear if TenantAdmin or WorkspaceAdmin`. - - `[code] No existing examples found for implementing audit logging in this context`. - - You can provide multiple categorized items. Use report_problem for urgent system bugs during work. - -**Call MCP CompleteWork tool**: -- `mode`: "task" -- `agentType`: Your agent type (backend-engineer, frontend-engineer, or qa-engineer) -- `taskSummary`: Objective technical description of what was implemented (imperative mood, sentence case). Examples: "Add user role endpoints with authorization", "Implement user avatar upload", "Fix null reference in payment processor". NEVER use subjective evaluations like "Excellent implementation" or "Clean code". -- `responseContent`: Your full response in markdown -- `feedback`: Mandatory categorized feedback using [system], [requirements], or [code] prefixes as described above - -⚠️ Your session terminates IMMEDIATELY after calling CompleteWork - ---- - -## REMINDER: Use Exact TodoWrite JSON - -**✅ DO: Copy the JSON from STEP 2**. - -**❌ DON'T: Create custom todo format**. diff --git a/.agent/workflows/process/review-end-to-end-tests.md b/.agent/workflows/process/review-end-to-end-tests.md deleted file mode 100644 index d247222bdb..0000000000 --- a/.agent/workflows/process/review-end-to-end-tests.md +++ /dev/null @@ -1,204 +0,0 @@ ---- -description: Review end-to-end test implementation for a [task] ---- -# Review E2E Tests Workflow - -You are reviewing: **{{{title}}}** - -**Agentic vs standalone mode:** Your system prompt will explicitly state if you are in *agentic mode*. Otherwise, assume *standalone mode* and skip steps marked "(skip in standalone mode)". - -- **Agentic mode**: The review request comes from `current-task.json`. The CLI passes only the task title as the slash command argument. You run autonomously without human supervision - work with your team to find solutions. -- **Standalone mode**: Test files are passed as command arguments `{{{title}}}`. Read test files from user-provided paths or from `git status`. - -## Review Principles - -**Zero Tolerance for Test Quality**: E2E tests must be perfect. ALL tests must pass, ZERO console errors, ZERO network errors, NO sleep statements. There are no exceptions. - -**Evidence-Based Reviews**: Every finding must be backed by rules in `/.agent/rules/end-to-end-tests/end-to-end-tests.md` or established patterns in the codebase. - -**Speed is Critical**: Tests must run fast. Reject tests that are unnecessarily slow or create too many small test files. - ---- - -## STEP 0: Mandatory Preparation - -1. **Read [PRODUCT_MANAGEMENT_TOOL]-specific guide** at `/.agent/reference/product-management/[PRODUCT_MANAGEMENT_TOOL].md` to understand terminology, status mapping, ID format, and MCP configuration. - -2. **Read `current-task.json` from `.workspace/agent-workspaces/{branch-name}/{agent-type}/current-task.json`** to get: - - `requestFilePath`: Request file path (contains engineer's request message) - - `responseFilePath`: Response file path (where you'll write your review outcome) - - `featureId`: [FeatureId] (the feature this task belongs to, or "ad-hoc" for ad-hoc work) - - `taskId`: [TaskId] (the task being reviewed, or "ad-hoc-yyyyMMdd-HHmm" for ad-hoc work) - - `taskTitle`: Task title - -3. **Read the request file** from the path in `requestFilePath`. - -4. **Read all files referenced in the engineer's request** (test files, implementation details, etc.). - -5. **Create Todo List** - -**CALL TodoWrite TOOL WITH THIS EXACT JSON - COPY AND PASTE**: - -```json -{ - "todos": [ - {"content": "Read [feature] and [task] to understand requirements", "status": "pending", "activeForm": "Reading feature and task"}, - {"content": "Run feature-specific e2e tests", "status": "pending", "activeForm": "Running feature E2E tests"}, - {"content": "Review test file structure and organization", "status": "pending", "activeForm": "Reviewing test structure"}, - {"content": "Review each test step for correct patterns", "status": "pending", "activeForm": "Reviewing test steps"}, - {"content": "Review test efficiency and speed", "status": "pending", "activeForm": "Reviewing test efficiency"}, - {"content": "Make binary decision (approve or reject)", "status": "pending", "activeForm": "Making decision"}, - {"content": "If approved, run full regression test suite", "status": "pending", "activeForm": "Running full regression tests"}, - {"content": "If approved, commit changes", "status": "pending", "activeForm": "Committing if approved"}, - {"content": "Update [task] status to [Completed] or [Active]", "status": "pending", "activeForm": "Updating task status"}, - {"content": "MANDATORY: Call CompleteWork", "status": "pending", "activeForm": "Calling CompleteWork"} - ] -} -``` - ---- - -## Workflow Steps - -**STEP 1**: Read [feature] and [task] to understand requirements - -1. **Read the [feature]** from `featureId` in [PRODUCT_MANAGEMENT_TOOL] (if not ad-hoc): - - Understand the overall problem and solution approach. - -2. **Read the [task]** from `taskId` in [PRODUCT_MANAGEMENT_TOOL]: - - Read the task description carefully. - - Understand what tests should cover. - -3. **Read engineer's request** to understand what tests were created. - -**If [task] lookup fails** (not found, already completed, or error): This is a coordination error. Report a problem and reject the review explaining the task could not be found. - -4. **Study E2E rules**: - - Read [End-to-End Tests](/.agent/rules/end-to-end-tests/end-to-end-tests.md) - - Ensure engineer followed all patterns - -**STEP 2**: Run feature-specific e2e tests first - -**If tests require backend changes, run the run tool first**: -- Use **run MCP tool** to restart server and run migrations -- The tool starts .NET Aspire at https://localhost:9000 - -**Run feature-specific E2E tests**: -- Use **end-to-end MCP tool** to run tests: `end-to-end(searchTerms=["feature-name"])` -- **ALL tests MUST pass with ZERO failures to approve** -- **Verify ZERO console errors** during test execution -- **Verify ZERO network errors** (no unexpected 4xx/5xx responses) -- If ANY test fails: REJECT -- If ANY console errors: REJECT -- If ANY network errors: REJECT - -**STEP 3**: Review test file structure and organization - -**Critical Check 1 - Test Count:** -- Normally ONE new `@comprehensive` test per feature -- Existing `@smoke` tests should be updated, not duplicated -- For BIG features: Allow both new `@smoke` and new `@comprehensive` -- **Reject if too many small test files created** - -**STEP 4**: Review each test step for correct patterns - -**Critical Check 1 - Step Naming Pattern:** -- **EVERY step MUST follow**: "Do something & verify result" -- ✅ Good: `"Submit login form & verify authentication"` -- ❌ Bad: `"Verify button is visible"` (no action) -- ❌ Bad: `"Test login"` (uses "test" prefix) -- **Reject if steps don't follow pattern** - -**Critical Check 2 - No Sleep Statements:** -- Search for: `waitForTimeout`, `sleep`, `delay`, `setTimeout` -- **Reject if found—no exceptions** -- Playwright auto-waits—sleep is NEVER needed in any scenario -- Demand Playwright await assertions instead: - - Use `toBeVisible()`, `toHaveURL()`, `toContainText()`, etc. - - These built-in auto-wait mechanisms handle all timing scenarios - -**STEP 5**: Review test efficiency and speed - -**Critical Check 1 - Leverage Existing Logic:** -- Verify tests use fixtures: `{ page }`, `{ ownerPage }`, `{ adminPage }`, `{ memberPage }` -- Verify tests use helpers: `expectToastMessage`, `expectValidationError`, etc. -- **Reject if tests duplicate existing logic** - -**Critical Check 2 - Speed Optimization:** -- Tests should test MANY things in FEW steps -- Avoid excessive navigation or setup -- Group related scenarios together -- **Reject if tests are unnecessarily slow** - -**STEP 6**: Make binary decision (approve or reject) - -**Aim for perfection, not "good enough".** - -**APPROVED only if ALL criteria met:** -- ✓ All E2E tests passed with zero failures -- ✓ Zero console errors during test execution -- ✓ Zero network errors during test execution -- ✓ No sleep statements found -- ✓ All steps follow "Do something & verify result" pattern -- ✓ Tests use existing fixtures and helpers -- ✓ Tests are efficient and fast - -**Reject if any issue exists—no exceptions. Common rationalizations to avoid:** -- ✗ "Test failed but feature works manually" → Reject, fix test -- ✗ "Console error unrelated to E2E code" → Reject anyway -- ✗ "It's just a warning" → Reject, zero means zero -- ✗ "Previous test run passed" → Reject anyway if current run has issues - -**When rejecting:** Do full review first, then reject with ALL issues listed (avoid multiple rounds). - -**STEP 7**: If approved, run full regression test suite - -**Before committing, run all e2e tests to ensure no regressions:** -- Use **end-to-end MCP tool** WITHOUT searchTerms: `end-to-end()` -- This runs the complete test suite across all browsers -- **ALL tests MUST pass with ZERO failures** -- If ANY test fails: REJECT (do not commit) - -**STEP 8**: Commit changes - -1. Stage test files: `git add ` for each test file -2. Commit: One line, imperative form, no description, no co-author -3. Get hash: `git rev-parse HEAD` - -Don't use `git add -A` or `git add .` - -**STEP 9**: Update [task] status to [Completed] or [Active] - -**If `featureId` is NOT "ad-hoc" (regular task from a feature):** -- If APPROVED: Update [task] status to [Completed]. -- If REJECTED: Update [task] status back to [Active]. - -**If `featureId` is "ad-hoc" (ad-hoc work):** -- Skip [PRODUCT_MANAGEMENT_TOOL] status updates. - -**STEP 10**: Call CompleteWork - -**Call MCP CompleteWork tool**: -- `mode`: "review" -- `agentType`: qa-reviewer -- `commitHash`: Commit hash if approved, null/empty if rejected -- `rejectReason`: Rejection reason if rejected, null/empty if approved -- `responseContent`: Your full review feedback -- `feedback`: Mandatory categorized feedback using prefixes: - - `[system]` — Workflow, MCP tools, agent coordination, message handling - - `[requirements]` — Requirements clarity, acceptance criteria, task description - - `[code]` — Code patterns, rules, architecture guidance - - Examples: `[system] end-to-end MCP tool reported test passed but it actually failed` or `[requirements] Feature requirements didn't specify mobile viewport testing` - -⚠️ Your session terminates IMMEDIATELY after calling CompleteWork. - ---- - -## Rules - -1. **Tests must pass** — Don't approve failing tests -2. **No sleep statements** — Non-negotiable -3. **Follow step pattern** — Every step needs action + verification -4. **One test per feature** — Avoid test proliferation -5. **Speed matters** — Reject slow, inefficient tests diff --git a/.agent/workflows/process/review-task.md b/.agent/workflows/process/review-task.md deleted file mode 100644 index 2bad9d994f..0000000000 --- a/.agent/workflows/process/review-task.md +++ /dev/null @@ -1,470 +0,0 @@ ---- -description: Review a specific [task] implementation from a [feature] following the systematic review workflow ---- -# Review Task Workflow - -You are reviewing: **{{{title}}}** - -**Agentic vs standalone mode:** Your system prompt will explicitly state if you are in *agentic mode*. Otherwise, assume *standalone mode* and skip steps marked "(skip in standalone mode)". - -- **Agentic mode**: The review request comes from `current-task.json`. The CLI passes only the task title as the slash command argument. You run autonomously without human supervision - work with your team to find solutions. -- **Standalone mode**: Review request is passed as command arguments `{{{title}}}`. Read changed files from `git status` or user-provided list. - -## Review Principles - -**Devil's Advocate Mindset**: Your job is to validate the engineer's work by actively searching for problems. Look for inconsistencies, deviations, and potential issues. - -**Zero Tolerance**: ALL findings must be fixed, regardless of severity. Never dismiss issues as "minor" or "not worth fixing". Every deviation from rules or established patterns must be addressed. - -**Evidence-Based Reviews**: Every finding must be backed by: -1. Explicit rules from `.agent/rules/` files, OR -2. Established patterns found elsewhere in the codebase (cite specific file:line examples), OR -3. Well-established ecosystem conventions (e.g., .NET interfaces prefixed with `I`) - -Avoid subjective personal preferences. - -**Line-by-Line Review**: Like GitHub PR reviews - comment ONLY on specific file:line combinations that have issues. NO comments on correct code. NO commentary on what was done well. - -**Objective Language**: State facts about rule violations or pattern deviations. Reference specific rules or codebase examples. Avoid subjective evaluations or praise. - -**Concise Communication**: Minimize token usage for the engineer. Focus only on what needs fixing. - ---- - -## STEP 0: Mandatory Preparation - -1. **Read [PRODUCT_MANAGEMENT_TOOL]-specific guide** at `/.agent/reference/product-management/[PRODUCT_MANAGEMENT_TOOL].md` to understand terminology, status mapping, ID format, and MCP configuration. - -2. **Read `current-task.json` from `.workspace/agent-workspaces/{branch-name}/{agent-type}/current-task.json`** to get: - - `requestFilePath`: Request file path (contains engineer's request message) - - `responseFilePath`: Response file path (where you'll write your review outcome) - - `featureId`: [FeatureId] (the feature this task belongs to, or "ad-hoc" for ad-hoc work) - - `taskId`: [TaskId] (the task being reviewed, or "ad-hoc-yyyyMMdd-HHmm" for ad-hoc work) - - `taskTitle`: Task title - -3. **Read the request file** from the path in `requestFilePath`. - -4. **Read all files referenced in the engineer's request** (implementation details, changed files, etc.). - -5. **Create Todo List** - -**CALL TodoWrite TOOL WITH THIS EXACT JSON - COPY AND PASTE**: - -```json -{ - "todos": [ - {"content": "Read [feature] and [task] to understand requirements", "status": "pending", "activeForm": "Reading feature and task"}, - {"content": "Create checklist of all requirements from [task] description", "status": "pending", "activeForm": "Creating requirements checklist"}, - {"content": "Run validation tools in parallel (format, test, inspect)", "status": "pending", "activeForm": "Running validation tools"}, - {"content": "Verify translations (frontend-reviewer only)", "status": "pending", "activeForm": "Verifying translations"}, - {"content": "Test in browser with zero tolerance (frontend-reviewer only)", "status": "pending", "activeForm": "Testing in browser"}, - {"content": "Review changed files one-by-one", "status": "pending", "activeForm": "Reviewing files"}, - {"content": "Review high-level architecture", "status": "pending", "activeForm": "Reviewing architecture"}, - {"content": "Verify all requirements met with tests", "status": "pending", "activeForm": "Verifying requirements"}, - {"content": "If approved, commit changes (or reject if any issues found)", "status": "pending", "activeForm": "Committing changes or rejecting"}, - {"content": "Update [task] status to [Completed] or [Active]", "status": "pending", "activeForm": "Updating task status"}, - {"content": "MANDATORY: Call CompleteWork", "status": "pending", "activeForm": "Calling CompleteWork"} - ] -} -``` - -**After creating this template**: Remove todo items marked for a different reviewer role. For example, if you're a backend-reviewer, remove items containing "(frontend-reviewer only)". - -**After creating base todo, unfold "Review changed files one-by-one":** - -1. Get list of changed files from engineer's request (NOT from git status). -2. Replace the single "Review changed files" item with individual file review items. -3. Use tree format (├─ and └─). - -**Example:** -``` -Review changed files one-by-one -├─ Read and review User.cs [pending] -├─ Read and review UserRepository.cs [pending] -├─ Read and review CreateUserCommand.cs [pending] -└─ Read and review UsersEndpoint.cs [pending] -``` - ---- - -## Workflow Steps - -**STEP 1**: Read [feature] and [task] to understand requirements - -1. **Read the [feature]** from `featureId` in [PRODUCT_MANAGEMENT_TOOL] (if not ad-hoc): - - Understand the overall problem and solution approach. - -2. **Read the [task]** from `taskId` in [PRODUCT_MANAGEMENT_TOOL]: - - Read the task description carefully. - - Note all subtask bullets (implementation steps). - -3. **Read engineer's request and response files** to understand what was actually implemented. - -**If [task] lookup fails** (not found, already completed, or error): This is a coordination error. Report a problem and reject the review explaining the task could not be found. - -**STEP 2**: Create checklist of all requirements from [task] description - -Extract ALL business rules, edge cases, and validations from task description: - - What are the business rules? (uniqueness, permissions, constraints). - - What validations are required? - - What edge cases must be handled? - - What should NOT be allowed? - - What are the tenant isolation requirements? - -**Example requirements checklist (focus on details, not obvious structure):** -``` -Business rules and validations: -- [ ] Email must be unique within tenant (not globally). -- [ ] Email validation (valid format). -- [ ] Only Tenant Owners can create users. -- [ ] Full name max length ≤ 100 characters. -- [ ] Cannot delete last Owner in tenant. -- [ ] Soft delete (not hard delete). -- [ ] Tenant isolation (users scoped to tenant). -- [ ] Max 3 tenant owners on a tenant. -... - -Edge cases and error handling: -- [ ] Test duplicate email rejection. -- [ ] Test invalid email format. -- [ ] Test non-owner attempting create (403 Forbidden). -- [ ] Test deleting last owner (should fail). -- [ ] Test name > 100 chars validation. -- [ ] Test creating user in different tenant (isolation). -... -``` - -This checklist focuses on non-obvious requirements that reviewers often miss. - -4. **Read engineer's request and response files** to understand what was actually implemented. - -The [feature] plan was AI-generated by tech-lead in a few minutes after interviewing the user. Engineers spend implementation time considering the code carefully. You are the expert reviewer. If implementation or task design doesn't align with: -- Feature intent. -- Rules in the project. -- Patterns used in the solution. -- Architectural patterns. -- Best practices. -- Simpler approaches. - -**Reject and provide guidance.** Better ideas from review phase should surface. - -**Collaborate with your team**: For complex problems or design questions, engage in conversation with engineers or other reviewers. Better solutions often emerge from team collaboration. - -**STEP 3**: Run validation tools - -**Zero tolerance for issues**: -- We deploy to production after review - quality is non-negotiable. -- **Boy Scout Rule**: The codebase must be cleaner than before. -- Reject if any failures, warnings, or problems exist anywhere in the system. -- This includes pre-existing issues unrelated to engineer's changes. -- Don't approve code with outstanding issues. -- Infrastructure failures (MCP errors, tools fail) → Reject, report problem, do not approve. - -**Inspect findings block merging**: If inspect returns "Issues found", the CI pipeline will fail and the code cannot be merged. The severity level (note/warning/error) is irrelevant - all findings must be fixed before approval. - -**For backend-reviewer** (validates all self-contained systems to catch cross-self-contained-system breakage): - -1. Run **build**, **format**, **test**, **inspect** following the global tool execution instructions. - -2. Handle validation results: - - **If NO parallel work notification in request**: REJECT if ANY failures found (zero tolerance). - - **If parallel work notification present** (e.g., "⚠️ Parallel Work: Frontend-engineer..."): - - REJECT if backend failures found (Core/, Api/, Tests/, Database/). - - IGNORE frontend failures (WebApp/) unless caused by backend API contract changes. - - If frontend failures seem related to backend API changes: Note in rejection that frontend-engineer may need to adapt. - -**For frontend-reviewer** (validates frontend only): - -1. Run **build**, **format**, **inspect** for frontend following the global tool execution instructions. - -2. Handle validation results: - - **If NO parallel work notification in request**: REJECT if ANY failures found (zero tolerance). - - **If parallel work notification present** (e.g., "⚠️ Parallel Work: Backend-engineer..."): - - REJECT if frontend failures found (WebApp/). - - IGNORE backend failures (Core/, Api/, Tests/) unless caused by frontend breaking the API contract. - - If backend failures seem related to API integration: Note in rejection. - -**For qa-reviewer** (validates E2E tests): - -1. Run **build** for frontend, then run **end-to-end** tests following the global tool execution instructions. - -2. REJECT if ANY failures found (zero tolerance). - -**If validation fails with errors unrelated to engineer's changes**: -- Check `git log --oneline` for recent parallel engineer commits. -- If recent commits exist: Sleep 5 minutes, re-run validation. -- If issue persists: REJECT. Per Boy Scout Rule, the engineer is responsible for fixing ALL issues found, even pre-existing ones. - -**Note**: All architectural rules for your role are embedded in your system prompt and available for reference at all times. - -**STEP 4**: Verify translations (frontend-reviewer only) - -Check all `*.po` files for empty `msgstr ""` entries and inconsistent domain terminology. Reject if translations are missing or terminology differs from established usage elsewhere. - -**STEP 5**: Test in browser with zero tolerance (frontend-reviewer only) - -**Required for frontend reviewers** - -If infrastructure issues prevent testing: Try to recover (use run MCP tool to restart server, retry browser). If recovery fails, complete the rest of your review, then reject with all findings including the infrastructure issue. Report problem for infrastructure failures. - -1. **Navigate to https://localhost:9000** and test ALL functionality: - - **Test the COMPLETE happy path** of the new feature from start to finish. - - **Test ALL edge cases**: validation errors, empty states, maximum values, special characters, boundary conditions. - - **Test user scenarios**: What would a user actually do with this feature? Try to break it. - - **Take screenshots** and critically examine if everything renders with expected layout and styling. - - Test in **dark mode** and **light mode** (switch theme and verify UI renders correctly). - - Test **localization** (switch language if feature has translatable strings). - - Test **responsive behavior**: mobile size, small browser, large browser (resize and verify layout adapts). - - Verify engineer documented what they tested - if not documented, REJECT. - - If website not responding, use **run** MCP tool to restart server. - -2. **Test with different user roles** (CRITICAL): - - Test as admin: `admin@platformplatform.local` / `UNLOCK`. - - **Test as non-admin user** if feature has role-based behavior. - - Verify permissions, access controls, and role-specific UI elements work correctly. - - REJECT if role-based features not tested with appropriate roles. - -3. **Monitor Network tab** - REJECT if ANY issues found: - - **Zero tolerance**: No failed requests, no 4xx/5xx errors. - - Check ALL API calls for the new feature execute successfully. - - No slow requests without explanation. - - REJECT if ANY network warnings or errors found (even pre-existing per Boy Scout rule). - - ✗ BAD: "500 error is backend problem" → REJECT ANYWAY. - - ✗ BAD: "Network error unrelated to my changes" → REJECT ANYWAY. - -4. **Monitor Console tab** - REJECT if ANY issues found: - - **Zero tolerance**: No console errors, no warnings. - - REJECT if ANY console errors or warnings found (even pre-existing per Boy Scout rule). - - Clear console and verify it stays clean during all interactions. - - ✗ BAD: "Warning unrelated to my code" → REJECT ANYWAY. - - ✗ BAD: "HMR error, not my problem" → REJECT ANYWAY. - -5. **Analyze screenshots for UI quality** (take screenshots of new UI): - - Check spacing, sizing, alignment, borders match design patterns. - - Verify responsive behavior (resize browser, test mobile viewport). - - Check color contrast, typography, visual hierarchy. - - REJECT if UI elements are misaligned, poorly spaced, or inconsistent. - - AI is bad at visual design - use your human judgment on screenshots. - -6. **Login instructions**: - - Username: `admin@platformplatform.local`. - - Use `UNLOCK` for verification code (works on localhost only). - - If user doesn't exist: Sign up for a new tenant, use `UNLOCK` for verification code. - -If you discover bugs during testing (API errors, broken functionality, console errors, network errors), reject. Zero tolerance means reject on any issue found. - -**Boy Scout Rule**: If you find pre-existing issues unrelated to engineer's changes, REJECT and require engineer to fix them. Zero tolerance means ZERO - not "only for my changes". - -**STEP 6**: Review changed files one-by-one - -**Review files individually, not in bulk:** - -For EACH file in your unfolded todo: -1. **Mark file [in_progress]** in todo. -2. **Read the ENTIRE file** using Read tool. -3. **Review line-by-line** against rules and patterns: - - Does it follow architectural patterns? (check similar files in codebase). - - Are there any rule violations or pattern deviations? - - Document findings: cite specific file:line + rule/pattern violated. -4. **Update todo item with result and mark [completed]**: - - If file has issues: Change to "Read and review FileName.cs (Issues found)". - - If file is clean: Change to "Read and review FileName.cs (Approved)". -5. **Move to next file**. - -**Example todo progression:** -``` -☒ ├─ Read and review TeamEndpoints.cs (Approved) -☒ ├─ Read and review CreateTeam.cs (Issues found) -☐ ├─ Read and review DeleteTeam.cs -``` - -**Why one-by-one:** -- Ensures thorough review of each file. -- Prevents missing details in bulk reviews. -- Critical for larger tasks. - -Play the devil's advocate, and reject if you find ANY small thing that is objectively not correct. - -**STEP 7**: Review high-level architecture - -After reviewing all individual files, step back and review the overall design: - -1. **Verify the implementation approach** makes sense: - - Are entities/aggregates designed correctly? - - Do commands/queries follow CQRS patterns? - - Are API contracts well-designed? - - Does the UI architecture follow patterns (frontend)? - -2. **Check cross-file consistency**: - - Do all pieces work together correctly? - - Are naming conventions consistent? - - Is the data flow logical? - -3. **Verify it solves the business problem**: - - Does this implementation actually deliver what the [task] requires? - - Are there simpler approaches? - -Play the devil's advocate, and reject if you find ANY small thing that is objectively not correct. - -**Update todo item:** -- Change to "Review high-level architecture (Approved)" or "(Issues found)". -- Mark as [completed]. - -**STEP 8**: Verify all requirements met with tests - -**Go through your requirements checklist from STEP 1 systematically:** - -For EACH business rule: -1. **Find the implementation** - Search the reviewed files for where this rule is enforced. -2. **Find the test** - Search test files for test covering this rule. -3. **Verify edge case coverage** - Does the test check boundary conditions, error paths? - -**For EACH validation:** -1. **Verify it exists** - Is the validation implemented? -2. **Verify error message** - Does it return proper error response? -3. **Verify test coverage** - Is there a test proving it rejects invalid input? - -**For EACH permission check:** -1. **Verify guard exists** - Is permission checked in command/endpoint? -2. **Verify correct roles** - Does it check the right role (Owner, Admin, Member)? -3. **Verify test coverage** - Is there a test proving unauthorized access is rejected (403)? - -If any requirement is missing, not implemented correctly, or not tested, reject with specific gaps. - -**Example verification:** -``` -Requirements verification: -✓ Email unique within tenant - Implemented in User.cs:45, tested in CreateUserTests.cs:120. -✗ Only Owners can create - No permission guard found in CreateUserCommand. -✗ Cannot delete last Owner - Implementation exists in DeleteUserCommand.cs:67 but NO TEST. -✗ Tenant isolation - Tests only check happy path, missing test for cross-tenant access. - -REJECT: Missing permission guard for create. Missing test for last-owner protection. Missing tenant isolation test. -``` - -**Update todo item:** -- Change to "Verify all requirements met with tests (Approved)" or "(Requirements missing)". -- Mark as [completed]. - -**STEP 9**: If approved, commit changes (or reject if any issues found) - -**Aim for perfection, not "good enough".** - -By this point, you've already marked each file, architecture, and requirements as "(Approved)" or "(Issues found)". Now make the final decision: - -**APPROVED only if ALL criteria met:** -- ✓ All validation tools passed (build, format, test, inspect). -- ✓ Browser testing completed successfully (frontend only). -- ✓ Zero console errors or warnings. -- ✓ Zero network errors (no 4xx, no 5xx). -- ✓ No skipped mandatory steps for ANY reason. -- ✓ All code follows rules and patterns. -- ✓ Pre-existing issues fixed (Boy Scout Rule). -- ✓ All files marked "(Approved)". -- ✓ Architecture marked "(Approved)". -- ✓ Requirements marked "(Approved)". - -**Reject if any issue exists - no exceptions. Common rationalizations to avoid:** -- ✗ "Backend issue, not frontend problem" → Reject anyway. -- ✗ "Previous review verified it" → Reject anyway. -- ✗ "Validation tools passed" → Not enough if browser has errors. -- ✗ "Infrastructure/MCP issue" → Reject anyway, report problem. -- ✗ "Pre-existing problem" → Reject anyway per Boy Scout Rule. -- ✗ "It's just a warning" → Reject, zero means zero. - -**When rejecting:** Do full review first, then reject with ALL issues listed (avoid multiple rounds). Skip to STEP 9 to update status, then STEP 10 to call CompleteWork. - -**If APPROVED, proceed with commit:** - -1. Identify files to commit from review context: - - Run `git status --porcelain` to see all changed files - - Filter to YOUR scope only: - - **Backend reviewer**: Api/Core/Tests files + `*.Api.json` files (auto-generated, in WebApp folder) - - **Frontend reviewer**: WebApp files + `*.po` files (auto-generated) EXCEPT `*.Api.json` files -2. Stage files: `git add ` for each file -3. Commit: One line, imperative form, no description, no co-author -4. Get hash: `git rev-parse HEAD` - -Don't use `git add -A` or `git add .` - -**STEP 10**: Update [task] status to [Completed] or [Active] - -**If `featureId` is NOT "ad-hoc" (regular task from a feature):** -- If APPROVED: Update [task] status to [Completed]. -- If REJECTED: Update [task] status back to [Active]. - -**If `featureId` is "ad-hoc" (ad-hoc work):** -- Skip [PRODUCT_MANAGEMENT_TOOL] status updates. - -**STEP 11**: Call CompleteWork - -Call MCP **CompleteWork** tool with `mode: "review"` - your session terminates after this call. - -**Categorized Feedback Required**: -Use category prefixes for all feedback: -- `[system]` - Workflow, MCP tools, agent coordination, message handling. -- `[requirements]` - Requirements clarity, acceptance criteria, task description. -- `[code]` - Code patterns, rules, architecture guidance. - -Examples: -- `[system] Validation tools reported stale results from previous run`. -- `[requirements] Engineer's file list didn't match git status - unclear which files were in scope`. -- `[code] Missing examples for implementing telemetry in this pattern`. - -**For APPROVED reviews**: -- Provide: `mode: "review"`. -- Provide: `commitHash` (from `git rev-parse HEAD` in STEP 8). -- Provide: `rejectReason` as null or empty string. -- Provide: `feedback`: Mandatory categorized feedback using [system], [requirements], or [code] prefixes. - -**For REJECTED reviews**: -- Provide: `mode: "review"`. -- Provide: `commitHash` as null or empty string. -- Provide: `rejectReason` (sentence case, imperative mood). -- Provide: `feedback`: Mandatory categorized feedback using [system], [requirements], or [code] prefixes. - ---- - -## Response Format Requirements - -When calling CompleteWork with `responseContent`: - -**For REJECTED reviews**: - -```markdown -[Short objective summary of why rejected - 1-2 sentences or short paragraph if more elaboration needed] - -## Issues - -### File.cs:Line -[Objective description of problem] -- **Rule/Pattern**: [Reference to .agent/rules/X.md or pattern from codebase] -- **Fix**: [Optional: Suggest specific change] - -### AnotherFile.cs:Line -[Objective description of problem] -- **Rule/Pattern**: [Reference] -- **Fix**: [Optional] -``` - -**For APPROVED reviews**: - -```markdown -[One sentence objective explanation of why approved, e.g., "Follows established patterns for X and complies with rules Y and Z"] -``` - -**Requirements**: -- Line-by-line review like GitHub PR. -- NO comments on correct code. -- NO subjective language ("excellent", "great", "well done"). -- NO dismissing issues as "minor" or "optional". -- Cite specific rules or codebase patterns. -- Keep responses concise to minimize token usage. - ---- - -## REMINDER: Use Exact TodoWrite JSON - -**✅ DO: Copy JSON from above**. - -**❌ DON'T: Create custom format**. diff --git a/.claude/rules/backend/api-tests.md b/.claude/rules/backend/api-tests.md index 206c9bcbb9..88922caeca 100644 --- a/.claude/rules/backend/api-tests.md +++ b/.claude/rules/backend/api-tests.md @@ -30,7 +30,7 @@ Guidelines for writing tests for the backend. By default, tests should test API 11. Avoid sharing fields between tests—prefer local constants or variables within each test method 12. Verify side effects like database changes and telemetry events 13. Always call `TelemetryEventsCollectorSpy.Reset()` as the last Arrange statement if API calls were used to set up state (to ensure only the events from the Act phase are verified) -14. Use the `Connection` property from `EndpointBaseTest` for test data—it provides a SQLite connection with: +14. Use the `Connection` property from `EndpointBaseTest` for test data—it provides a database connection with: - `Insert` to populate test data - `Update` to update test data - `Delete` to delete test data @@ -39,7 +39,7 @@ Guidelines for writing tests for the backend. By default, tests should test API 15. Never use Dapper for database operations in tests—this is the main reason for rejected tests 16. The `EndpointBaseTest` class provides: - Authenticated and anonymous HTTP clients - - In-memory SQLite database for test isolation + - In-memory database for test isolation - Service mocking with NSubstitute - Telemetry event collection - Proper test cleanup with the Dispose pattern @@ -54,7 +54,7 @@ Ensure consistent ordering, naming, spacing, and line breaks. When creating SQL public async Task CompleteLogin_WhenValid_ShouldCompleteLoginAndCreateTokens() { // Arrange - var (loginId, _) = await StartLogin(DatabaseSeeder.User1.Email); // ✅ DO: Use test helpers for setup + var (loginId, _) = await StartLogin(DatabaseSeeder.Tenant1Owner.Email); // ✅ DO: Use test helpers for setup var command = new CompleteLoginCommand(CorrectOneTimePassword); TelemetryEventsCollectorSpy.Reset(); // ✅ DO: Reset telemetry if API was called in Arrange @@ -63,7 +63,7 @@ public async Task CompleteLogin_WhenValid_ShouldCompleteLoginAndCreateTokens() // Assert await response.ShouldBeSuccessfulPostRequest(hasLocation: false); // ✅ DO: Use custom assertion helpers - Connection.ExecuteScalar("SELECT COUNT(*) FROM Logins WHERE Id = @id AND Completed = 1", new { id = loginId.ToString() }).Should().Be(1); // ✅ DO: Verify DB side effects + Connection.ExecuteScalar("SELECT COUNT(*) FROM email_logins WHERE id = @id AND completed = 1", new { id = loginId.ToString() }).Should().Be(1); // ✅ DO: Verify DB side effects TelemetryEventsCollectorSpy.CollectedEvents.Count.Should().Be(2); // ✅ DO: Verify telemetry TelemetryEventsCollectorSpy.CollectedEvents[0].GetType().Name.Should().Be("LoginStarted"); TelemetryEventsCollectorSpy.CollectedEvents[1].GetType().Name.Should().Be("LoginCompleted"); // ✅ DO: Verify the correct events were collected @@ -82,12 +82,12 @@ public async Task BadTest() private string InsertTestUser(string? email = null) { var userId = UserId.NewId().ToString(); - Connection.Insert("Users", [ - ("TenantId", DatabaseSeeder.Tenant1.Id.ToString()), - ("Id", userId), - ("CreatedAt", TimeProvider.System.GetUtcNow().AddMinutes(-10)), // ✅ DO: Use TimeProvider for dates - ("ModifiedAt", null), - ("Email", email ?? Faker.Internet.Email()) + Connection.Insert("users", [ + ("tenant_id", DatabaseSeeder.Tenant1.Id.ToString()), + ("id", userId), + ("created_at", TimeProvider.System.GetUtcNow().AddMinutes(-10)), // ✅ DO: Use TimeProvider for dates + ("modified_at", null), + ("email", email ?? Faker.Internet.Email()) ]); return userId; } @@ -115,8 +115,8 @@ public class BadTestSetup connection.Open(); // Insert user // ❌ DON'T: Add comments - connection.Execute("INSERT INTO Users ..."); - connection.Execute("INSERT INTO Users (Email, Id, TenantId) VALUES (@Email, @Id, @TenantId)", new { Email = "test@example.com", Id = Guid.NewGuid(), TenantId = 1 }); // ❌ DON'T: Use Dapper Execute + connection.Execute("INSERT INTO users ..."); + connection.Execute("INSERT INTO users (email, id, tenant_id) VALUES (@Email, @Id, @TenantId)", new { Email = "test@example.com", Id = Guid.NewGuid(), TenantId = 1 }); // ❌ DON'T: Use Dapper Execute } } ``` diff --git a/.claude/rules/backend/backend.md b/.claude/rules/backend/backend.md index 5b13e20ffc..ad381987fb 100644 --- a/.claude/rules/backend/backend.md +++ b/.claude/rules/backend/backend.md @@ -34,7 +34,7 @@ Guidelines for C# backend development, including code style, naming, exceptions, // ✅ DO: Wrap to 2 lines when needed, but never 3, 4, or 5 lines var updatedLocale = Connection.ExecuteScalar( - "SELECT Locale FROM Users WHERE Id = @id", new { id = DatabaseSeeder.Tenant1Owner.Id.ToString() } + "SELECT locale FROM users WHERE id = @id", new { id = DatabaseSeeder.Tenant1Owner.Id.ToString() } ); // ❌ DON'T: Split method parameters across multiple lines when they fit before 120 chars diff --git a/.claude/rules/backend/database-migrations.md b/.claude/rules/backend/database-migrations.md index c9bbaa14b0..6c71b35612 100644 --- a/.claude/rules/backend/database-migrations.md +++ b/.claude/rules/backend/database-migrations.md @@ -1,41 +1,52 @@ --- paths: **/Database/Migrations/*.cs -description: Rules for creating database migrations +description: Rules for creating database migrations with PostgreSQL conventions --- # Database Migrations -Guidelines for creating database migrations. +Guidelines for creating database migrations using PostgreSQL conventions with snake_case naming. ## Implementation 1. Create migrations manually rather than using Entity Framework tooling: - Place migrations in `/[scs-name]/Core/Database/Migrations` - Name migration files with 14-digit timestamp prefix: `YYYYMMDDHHmmss_MigrationName.cs` - - Only implement the `Up` method—don't create `Down` migration + - Only implement the `Up` method--don't create `Down` migration 2. Follow this strict column ordering in table creation statements: - - `TenantId` (if applicable) - - `Id` (always required) + - `tenant_id` (if applicable) + - `id` (always required) - Foreign keys (if applicable) - - `CreatedAt` and `ModifiedAt` as non-nullable `datetimeoffset` + - `created_at` and `modified_at` as non-nullable/nullable `timestamptz` - All other properties in the same order as they appear in the C# Aggregate class -3. Use appropriate SQL Server data types: - - Use `varchar(32)` for strongly typed IDs (ULID is 26 chars + underscore + max 5-char prefix = exactly 32) - - Intelligently deduce varchar vs nvarchar based on property type, validators, enum values, etc. - - Use `datetimeoffset` (default), `datetime2` (timezone agnostic), or `date`—never `datetime` - - Default to `varchar(10)` or `varchar(20)` for enum values -4. Create appropriate constraints and indexes: - - Primary keys: `PK_TableName` - - Foreign keys: `FK_ChildTable_ParentTable_ColumnName` - - Indexes: `IX_TableName_ColumnName` - -5. Migrate existing data: - - Use `migrationBuilder.Sql("UPDATE [table] SET [column] = [value] WHERE [condition]")` with care -6. Use standard SQL Server naming conventions: - - Table names should be plural (e.g., `Users`, not `User`) - - Constraint and index names should follow the patterns above +3. Use snake_case naming for everything: + - Table names: plural, lowercase (e.g., `users`, `email_logins`, `stripe_events`) + - Column names: lowercase with underscores (e.g., `tenant_id`, `created_at`, `email_confirmed`) + - C# anonymous type members must also be snake_case (e.g., `tenant_id = table.Column(...)`) + - Constraint names: `pk_table_name`, `fk_child_table_parent_table_column_name`, `ix_table_name_column_name` + +4. Use appropriate PostgreSQL data types: + - Use `text` for all string columns--PostgreSQL stores `text`, `varchar`, and `varchar(N)` identically with no performance difference + - Use `timestamptz` for `DateTimeOffset` columns--never `timestamp`, `datetime`, or `datetimeoffset` + - Use `boolean` for bool properties + - Use `integer` for int properties + - Use `bigint` for long properties (e.g., `TenantId`) + - Use `numeric(18,2)` for decimal properties and always add `HasPrecision(18, 2)` in the EF Core configuration + - Use `jsonb` for all JSON columns--both `OwnsOne(..., b => b.ToJson())` and manually serialized collections/objects using `HasConversion` with `JsonSerializer`. When using `HasConversion`, also add `.HasColumnType("jsonb")` in the EF configuration so Npgsql sends the value as `jsonb` instead of `text` + - Enforce length constraints at the application level using FluentValidation, not at the database level + +5. For indexes on large tables, use `migrationBuilder.Sql("CREATE INDEX CONCURRENTLY ...")` to avoid locking the table during creation. This is safe because the CI/CD pipeline applies migrations with `ON_ERROR_STOP=1` without `--single-transaction`, and the generated script is idempotent. + +6. Create appropriate constraints and indexes: + - Primary keys: `pk_table_name` + - Foreign keys: `fk_child_table_parent_table_column_name` + - Indexes: `ix_table_name_column_name` + - Filtered indexes use PostgreSQL `WHERE` clause syntax (e.g., `filter: "deleted_at IS NULL"`) + +7. Migrate existing data: + - Use `migrationBuilder.Sql("UPDATE table_name SET column_name = value WHERE condition")` with care ## Examples @@ -49,55 +60,53 @@ public sealed class AddUserPreferences : Migration protected override void Up(MigrationBuilder migrationBuilder) { migrationBuilder.CreateTable( - "UserPreferences", + "user_preferences", // ✅ DO: Use snake_case plural table name table => new { - TenantId = table.Column("bigint", nullable: false), // ✅ DO: Add TenantId as first column - Id = table.Column("varchar(32)", nullable: false), // ✅ DO: Make Id varchar(32) by default - UserId = table.Column("varchar(32)", nullable: false), // ✅ DO: Add Foreginkey before CreatedAt/ModifiedAt - CreatedAt = table.Column("datetimeoffset", nullable: false), - ModifiedAt = table.Column("datetimeoffset", nullable: true), - Language = table.Column("varchar(10)", nullable: false) // ✅ DO: Use varchar when colum has known values + tenant_id = table.Column("bigint", nullable: false), // ✅ DO: Add tenant_id as first column + id = table.Column("text", nullable: false), // ✅ DO: Use text for all string columns + user_id = table.Column("text", nullable: false), // ✅ DO: Add foreign key before created_at/modified_at + created_at = table.Column("timestamptz", nullable: false), // ✅ DO: Use timestamptz + modified_at = table.Column("timestamptz", nullable: true), + language = table.Column("text", nullable: false) // ✅ DO: Use text for string columns }, constraints: table => { - table.PrimaryKey("PK_UserPreferences", x => x.Id); - table.ForeignKey("FK_UserPreferences_Users_UserId", x => x.UserId, "Users", "Id"); + table.PrimaryKey("pk_user_preferences", x => x.id); // ✅ DO: Use pk_table_name + table.ForeignKey("fk_user_preferences_users_user_id", x => x.user_id, "users", "id"); // ✅ DO: Use fk_child_parent_column } ); - migrationBuilder.CreateIndex("IX_UserPreferences_TenantId", "UserPreferences", "TenantId"); - migrationBuilder.CreateIndex("IX_UserPreferences_UserId", "UserPreferences", "UserId"); + migrationBuilder.CreateIndex("ix_user_preferences_tenant_id", "user_preferences", "tenant_id"); // ✅ DO: Use ix_table_column + migrationBuilder.CreateIndex("ix_user_preferences_user_id", "user_preferences", "user_id"); } } -// ❌ DON'T: Forget to add the attribute [DbContext(typeof(XxxDbContext))] for the self-contained system +// ❌ DON'T: Forget to add the attribute [DbContext(typeof(XxxDbContext))] for the self-contained system [Migration("20250507_AddUserPrefs")] // ❌ Missing proper 14-digit timestamp public class AddUserPrefsMigration : Migration // ❌ Not sealed, incorrect naming, suffixed with Migration { protected override void Up(MigrationBuilder migrationBuilder) { - // Create UserPreferences table // ❌ DON'T: Add comments migrationBuilder.CreateTable( - "UserPreference", // ❌ DON'T: use singular name for table + "UserPreference", // ❌ DON'T: Use PascalCase or singular name for table table => new { - Id = table.Column("varchar(30)", nullable: false), // ❌ DON'T: Use varchar(30) for ULID - Theme = table.Column("varchar(20)", nullable: false), // ❌ DON'T: Add properties before CreatedAt/ModifiedAt - TenantId = table.Column("bigint", nullable: false), // ❌ TenantId should be first - CreatedAt = table.Column("datetimeoffset", nullable: false), + Id = table.Column("varchar(30)", nullable: false), // ❌ DON'T: Use PascalCase column names or varchar(N)--use text instead + Theme = table.Column("varchar(20)", nullable: false), // ❌ DON'T: Add properties before created_at/modified_at or use varchar(N) + TenantId = table.Column("bigint", nullable: false), // ❌ tenant_id should be first, and snake_case + CreatedAt = table.Column("datetimeoffset", nullable: false), // ❌ DON'T: Use SQL Server types ModifiedAt = table.Column("datetime", nullable: true), // ❌ DON'T: Use datetime - UserId = table.Column("varchar(32)", nullable: false), // ❌ Foreign key after CreatedAt/ModifiedAt - Language = table.Column("varchar(10)", nullable: false), // ❌ Trailing comma + UserId = table.Column("varchar(32)", nullable: false), // ❌ Foreign key after created_at/modified_at, use text not varchar }, constraints: table => { - table.PrimaryKey("PrimaryKey_UserPreference", i => i.Id); // ❌ Incorrect PK naming, variable should be x not i - table.ForeignKey("ForeignKey_UserPreference_User", x => x.UserId, "Users", "Id"); // ❌ Incorrect FK naming + table.PrimaryKey("PK_UserPreference", i => i.Id); // ❌ PascalCase PK naming, variable should be x not i + table.ForeignKey("FK_UserPreference_User", x => x.UserId, "Users", "Id"); // ❌ PascalCase FK naming } ); } - + protected override void Down(MigrationBuilder migrationBuilder) // ❌ DON'T: Create a down method { migrationBuilder.DropTable("UserPreference"); @@ -105,20 +114,29 @@ public class AddUserPrefsMigration : Migration // ❌ Not sealed, incorrect nam } ``` -### Example 2 - Determining column sizes from validators +### Example 2 - Filtered indexes and data migrations + +```csharp +// ✅ DO: Use PostgreSQL WHERE clause syntax for filtered indexes +migrationBuilder.CreateIndex("ix_users_tenant_id_email", "users", ["tenant_id", "email"], unique: true, filter: "deleted_at IS NULL"); +migrationBuilder.CreateIndex("ix_subscriptions_stripe_customer_id", "subscriptions", "stripe_customer_id", unique: true, filter: "stripe_customer_id IS NOT NULL"); + +// ❌ DON'T: Use SQL Server bracket notation +migrationBuilder.CreateIndex("IX_Users_TenantId_Email", "Users", ["TenantId", "Email"], unique: true, filter: "[DeletedAt] IS NULL"); +``` ```csharp +// ✅ DO: Use text for string columns, enforce length in validators public sealed class UpdateUserValidator : AbstractValidator { public UpdateUserValidator() { - RuleFor(x => x.TimeZone).NotEmpty().MaximumLength(50); // ✅ DO: Use column sizes based on command validators + RuleFor(x => x.TimeZone).NotEmpty().MaximumLength(50); } } protected override void Up(MigrationBuilder migrationBuilder) { - migrationBuilder.AddColumn("TimeZone", "Users", "varchar(50)", nullable: false, defaultValue: "UTC"); // ✅ DO: Match column size to validator - // ✅ DO: Consider running complex logic here to update existing records + migrationBuilder.AddColumn("time_zone", "users", "text", nullable: false, defaultValue: "UTC"); } ``` diff --git a/.claude/rules/backend/domain-modeling.md b/.claude/rules/backend/domain-modeling.md index d3a14b6783..be1fb8f19d 100644 --- a/.claude/rules/backend/domain-modeling.md +++ b/.claude/rules/backend/domain-modeling.md @@ -132,9 +132,9 @@ public sealed class InvoiceConfiguration : IEntityTypeConfiguration builder.OwnsOne(i => i.Address, b => b.ToJson()); // ✅ DO: Map 1:1 valueobjects and entites with .ToJson() - // ✅ DO: Map collection with custom JsonSerializer + // ✅ DO: Map collection with custom JsonSerializer and HasColumnType("jsonb") builder.Property(i => i.InvoiceLines) - .HasColumnName("InvoiceLines") + .HasColumnType("jsonb") .HasConversion( v => JsonSerializer.Serialize(v.ToArray(), JsonSerializerOptions), v => JsonSerializer.Deserialize>(v, JsonSerializerOptions) diff --git a/.claude/rules/backend/repositories.md b/.claude/rules/backend/repositories.md index 6396635bc9..1f9d4872a8 100644 --- a/.claude/rules/backend/repositories.md +++ b/.claude/rules/backend/repositories.md @@ -32,6 +32,7 @@ Guidelines for implementing DDD repositories in the backend, including structure 14. Never do N+1 queries 15. Don't register repositories in DI—SharedKernel registers them automatically 16. Don't add DbSets to DbContext—RepositoryBase handles this automatically +17. Add `.OrderBy(e => e.Id)` when caller depends on order (`[0]`, `.First()`, pagination)—ULIDs are chronological ## Examples diff --git a/.claude/rules/backend/strongly-typed-ids.md b/.claude/rules/backend/strongly-typed-ids.md index 2bc0e2ad23..77da582daf 100644 --- a/.claude/rules/backend/strongly-typed-ids.md +++ b/.claude/rules/backend/strongly-typed-ids.md @@ -11,7 +11,7 @@ Guidelines for implementing strongly typed IDs in the backend, covering type saf 1. Use strongly typed IDs to provide type safety and prevent mixing different ID types, improving readability and maintainability 2. By default, use `StronglyTypedUlid` as the base class—it provides chronological ordering and includes a prefix for easy recognition (e.g., `usr_01JMVAW4T4320KJ3A7EJMCG8R0`) -3. Use the `[IdPrefix]` attribute with a short prefix (max 5 characters)—ULIDs are 26 chars, plus 5-char prefix and underscore = 32 chars for varchar(32) +3. Use the `[IdPrefix]` attribute with a short prefix (max 5 characters)—ULIDs are 26 chars, plus 5-char prefix and underscore = 32 chars max 4. Follow the naming convention `[Entity]Id` 5. Include the `[JsonConverter]` attribute for proper serialization 6. Always override `ToString()` in the concrete class—record types don't inherit this from the base class diff --git a/.cursor/rules/workflows/modes/agentic-workflow.mdc b/.cursor/rules/workflows/modes/agentic-workflow.mdc deleted file mode 100644 index f03494c4fe..0000000000 --- a/.cursor/rules/workflows/modes/agentic-workflow.mdc +++ /dev/null @@ -1,480 +0,0 @@ ---- -description: Load comprehensive knowledge about the PlatformPlatform agentic workflow system -globs: -alwaysApply: false ---- -# Agentic Workflow System Knowledge - -You now have complete knowledge of the agentic workflow system used in this codebase. - -## System Architecture Overview - -**Core Concept**: Hierarchical AI agent system where coordinator delegates to engineers, engineers delegate to reviewers. All agents run as interactive worker-hosts that communicate via request/response files in a shared messages directory. - -**Agent Hierarchy**: -``` -Tech Lead (creates PRDs) - └─→ Hands off to Coordinator - -Coordinator (orchestrates implementation) - ├─→ Backend Engineer → Backend Reviewer (commits) - ├─→ Frontend Engineer → Frontend Reviewer (commits) - └─→ QA Engineer → QA Reviewer (commits) -``` - -**Process Structure**: Each agent type has two processes: -- **Worker-host** (C# CLI): Manages lifecycle, file watching, launches Claude Code workers -- **Worker agent** (Claude Code): Does actual AI work, uses MCP tools, self-destructs when done - -**Key Mechanisms**: -- Session management: Explicit GUIDs in `.claude-session-id` files -- Request detection: FileSystemWatcher monitors `*.{agentType}.request.*.mdc` files -- Process monitoring: Inactivity detection (20-62 min), restart logic (max 2 restarts) -- Task recovery: Prompts user to continue incomplete tasks on startup - -## Terminology Standards - -**The PlatformPlatform workflow is tool-agnostic**. Users can switch between Linear, AzureDevOps, Jira, or even markdown files by simply changing `[PRODUCT_MANAGEMENT_TOOL]` in AGENTS.md. - -### Use These Standardized Terms - -When writing or updating `.cursor/rules/workflows/process/**` files, system prompts, agent definitions, or any workflow documentation: - -**Work Item Hierarchy**: -- `[feature]` / `[features]` or `[Feature]` / `[Features]` — a collection of tasks -- `[task]` / `[tasks]` or `[Task]` / `[Tasks]` — a complete vertical slice implementation unit -- `[subtask]` / `[subtasks]` or `[Subtask]` / `[Subtasks]` — bullet points in task descriptions (not tracked separately) - -**Status Flow**: -- For [Feature]: `[Planned]` → `[Active]` → `[Resolved]` -- For [Task]: `[Planned]` → `[Active]` → `[Review]` → `[Completed]` -- For [Subtask]: No status (just bullets in description) - -Use capitalized forms (`[Feature]`, `[Task]`, `[Subtask]`) when it reads more naturally in sentences. - -### Don't Use Tool-Specific Terms - -**Forbidden terms** (these are specific to certain tools): -- ❌ Issue, Issues -- ❌ User Story, User Stories -- ❌ Epic, Epics -- ❌ Work Item, Work Items -- ❌ Ticket, Tickets -- ❌ Bug, Bugs (unless specifically referring to a defect type) -- ❌ Tool-specific status names (New, Doing, Done, In Progress, Resolved, Closed, etc.) - -**Why this matters**: -- Linear uses "Issue" for everything -- AzureDevOps uses "Work Item" with types (Epic, Feature, User Story, Task, Bug) -- Jira uses "Issue" with types (Epic, Story, Task, Sub-task) -- Our workflow must work with ALL of these tools without modification - -### Mapping Examples - -When the workflow runs, the underlying tool maps our generic terms: - -**AzureDevOps**: -- `[feature]` → User Story work item type -- `[task]` → Task work item type -- `[subtask]` → Bullet point in task description -- For [Feature]: `[Planned]` → New, `[Active]` → Active, `[Resolved]` → Resolved -- For [Task]: `[Planned]` → New, `[Active]` → Active, `[Review]` → Resolved, `[Completed]` → Closed - -**Linear**: -- `[feature]` → Project -- `[task]` → Issue -- `[subtask]` → Bullet point in task description -- For [Feature]: `[Planned]` → Todo, `[Active]` → In Progress, `[Resolved]` → In Progress -- For [Task]: `[Planned]` → Todo, `[Active]` → In Progress, `[Review]` → In Review, `[Completed]` → Done - -**The workflow code handles these mappings**—your job is to use ONLY the standardized terms in all documentation. - -### When Updating Workflow Files - -**Before making changes to `.cursor/rules/workflows/process/**` files**: - -1. Search for tool-specific terms and replace with standardized terms -2. Verify status flows use only `[Planned]` → `[Active]` → `[Review]` → `[Completed]` -3. Never add hints or examples that reference specific tool terminology -4. Use `[PRODUCT_MANAGEMENT_TOOL]` placeholder when referring to the tool itself - -**Example - GOOD**: -```markdown -1. Retrieve the [Feature] from [PRODUCT_MANAGEMENT_TOOL] -2. Load all [Tasks] from the [Feature] -3. Move the [Feature] to [Active] status -4. For each [Task], implement and move to [Review] -``` - -**Example - BAD**: -```markdown -1. Retrieve the Feature from AzureDevOps (or Project from Linear) -2. Break down the Feature into User Stories, then into Tasks -3. Move the Feature to "In Progress" status -4. For each task, implement and move to "Code Review" -``` - -## Workspace Structure - -Agent workspaces are organized based on whether agents are branch-specific or branch-agnostic: - -**Branch-agnostic agents** (pair-programmer, tech-lead): -``` -.workspace/agent-workspaces/ -├── pair-programmer/ # Branch-agnostic workspace -│ ├── .host-process-id # Worker-host PID -│ ├── .worker-process-id # Claude Code PID (when active) -│ ├── .claude-session-id # Session GUID for persistence -│ ├── current-task.json # Active task metadata -│ ├── *.claude-session-id # Saved sessions -│ ├── logs/ # Workflow event logs -│ │ └── developer-cli-{date}.log -│ └── feedback-reports/ # Problem reports -│ ├── problems/ # Open issues -│ │ └── {timestamp}-{severity}-{slug}.md -│ └── done/ # Resolved issues -└── tech-lead/ # Branch-agnostic workspace - └── (same structure as pair-programmer) -``` - -**Branch-specific agents** (coordinator, engineers, reviewers): -``` -.workspace/agent-workspaces/{branch}/ -├── messages/ # Shared request/response files -│ ├── .task-counter # Increments for each task (0001, 0002, etc.) -│ ├── NNNN.{agent}.request.{slug}.md # Delegation requests -│ └── NNNN.{agent}.response.{slug}.md # Agent responses -├── {agent-type}/ # Per-agent workspace -│ ├── .host-process-id # Worker-host PID -│ ├── .worker-process-id # Claude Code PID (when active) -│ ├── .claude-session-id # Session GUID for persistence -│ └── current-task.json # Active task metadata -├── developer-cli-{date}.log # Workflow event logs (at branch root) -└── feedback-reports/ # Problem reports from agents - ├── problems/ # Open issues (YAML frontmatter) - │ └── {timestamp}-{severity}-{slug}.md - └── done/ # Resolved issues -``` - -## Agent Types and Responsibilities - -### Tech Lead (`tech-lead`) -- Creates PRDs and defines features -- Conducts research and discovery -- Defines what to build, not how -- Doesn't implement features or delegate to engineers -- Hands off to coordinator for implementation -- Runs continuously, relaunching after each session ends -- Auto-launches immediately when started - -### Coordinator (`coordinator`) -- Orchestrates feature implementation -- Delegates tasks to engineers via Task tool (proxy agents) -- Monitors progress through response files -- Doesn't code or commit -- Runs continuously, relaunching after each session ends -- Auto-launches immediately when started -- Prompts user to select feature, then runs `/process:implement-feature` - -### Engineers (`backend-engineer`, `frontend-engineer`, `qa-engineer`) -- Implement code within their specialty (backend: Core/Api/Tests, frontend: WebApp, qa: e2e tests) -- Run tests and validation tools -- Delegate to their corresponding reviewer for approval -- Iterate on reviewer feedback until approved -- Session persists across tasks (via `.claude-session-id`) -- Wait for MCP delegation from coordinator - -### Reviewers (`backend-reviewer`, `frontend-reviewer`, `qa-reviewer`) -- Review code quality, architecture, and adherence to rules -- Run validation tools (build, test, format) -- **Commit approved code** and provide commit hash -- Reject with detailed feedback if issues found -- Return control to engineer (via response file) -- Wait for MCP delegation from their engineer - -### Pair Programmer (`pair-programmer`) -- General-purpose engineer for direct user collaboration -- Can work on any code (no boundaries) -- Auto-launches immediately when started -- User steers work directly through conversation -- Commits directly for workflow/system fixes - -## Communication Protocol - -### Request/Response Pattern - -**Request file format** (`NNNN.{agent}.request.{slug}.md`): -```yaml ---- -from: {sender-agent-type} -to: {target-agent-type} -request-number: NNNN -timestamp: 2025-11-01T14:30:00+01:00 -feature-id: {feature-id-from-PRODUCT_MANAGEMENT_TOOL} -task-id: {task-id-from-PRODUCT_MANAGEMENT_TOOL} ---- - -[Markdown content with task description] -``` - -**Response file format** (`NNNN.{agent}.response.{slug}.md`): -```markdown -[Agent's response after completing work] -``` - -### Delegation Flow - -1. **Coordinator → Engineer**: - - Coordinator creates request file via Task tool → proxy agent → MCP `start_worker_agent` - - Engineer's worker-host detects file via FileSystemWatcher - - Engineer launches Claude Code worker with `/process:implement-task` slash command - - Engineer implements code, runs tests - -2. **Engineer → Reviewer**: - - Engineer creates request file via MCP `start_worker_agent` - - Reviewer's worker-host detects file - - Reviewer launches with `/process:review-task` slash command - - Reviewer validates and either approves (commits) or rejects - -3. **If Rejected**: - - Reviewer writes response with rejection reason - - Engineer receives response, fixes issues - - Engineer delegates to reviewer again (loop continues) - -4. **If Approved**: - - Reviewer commits code, writes response with commit hash - - Engineer receives response with commit confirmation - - Engineer completes task, writes response to coordinator - -5. **Coordinator Receives Completion**: - - Coordinator gets response from engineer - - Coordinator proceeds to next task - -## Problem Reports System - -### Reading Problem Reports - -Agents create problem reports when encountering workflow/system bugs (NOT feature bugs). - -**Location**: -- Branch-specific agents: `.workspace/agent-workspaces/{branch}/feedback-reports/problems/` -- Branch-agnostic agents: `.workspace/agent-workspaces/{agentType}/feedback-reports/problems/` - -**YAML Frontmatter Format**: -```yaml ---- -report-id: HH-MM-SS-{severity}-{slug} -timestamp: 2025-11-01T14:30:00+01:00 -reporter: {agent-type} -severity: error|warning|info -location: {file-path-or-context} -status: open|resolved ---- - -# Problem Title - -## Description -[Detailed description of the workflow/system bug] - -## What Happened -[Specific sequence of events] - -## Root Cause -[Analysis of why it happened] - -## Suggested Fix -[Recommendations for fixing] -``` - -### Processing Problem Reports - -When working on problem reports: - -1. **Read reports** with `status: open` -2. **Prioritize**: error > warning > info -3. **Analyze**: Read affected files to understand root cause -4. **Fix**: Make targeted changes (system prompts, MCP tools, workflow code, agent definitions) -5. **Validate**: Run appropriate tools (build, test, format) -6. **Commit**: Descriptive message, optionally reference report filename -7. **Move**: Move report file from `problems/` to `problems/done/` - -**Example workflow**: -```bash -# Read problem -Read: .workspace/agent-workspaces/cto/feedback-reports/problems/14-30-00-error-mcp-tool-fails.md - -# Fix the issue -Edit: developer-cli/Commands/McpCommand.cs - -# Validate -Use: mcp__developer-cli__execute_command (command: "build", backend: true) - -# Commit -git add developer-cli/Commands/McpCommand.cs -git commit -m "Fix MCP tool parameter validation" - -# Move to done -mv .workspace/agent-workspaces/cto/feedback-reports/problems/14-30-00-error-mcp-tool-fails.md \ - .workspace/agent-workspaces/cto/feedback-reports/problems/done/ -``` - -### Types of Problems - -**Report these** (workflow/system bugs): -- MCP tool errors or incorrect parameters -- System prompt contradictions or missing guidance -- Agent communication failures or message format issues -- Workflow file paths that don't exist -- Agent definitions with wrong tool permissions -- Slash commands with incorrect instructions - -**Don't report** (feature/implementation issues): -- Business logic bugs -- Missing product features -- Code quality problems in production code -- Unclear product requirements -- Your own implementation bugs - -## Session Management - -### Session Persistence - -**`.claude-session-id` file**: -- Contains: GUID for explicit session tracking -- Created: Before first agent launch (if not exists) -- Used: To resume sessions with `claude --resume {guid}` -- Never deleted: Enables conversation continuity across tasks -- Shared across tasks: Same session ID used for all tasks in a [feature] - -### Memory Reset - -**When to reset**: -- Starting a new task (fresh context needed) -- Agent stuck or producing poor quality work -- Coordinator triggers via MCP with `resetMemory: true` - -**How to reset**: -- Delete `.claude-session-id` file -- Next launch creates new session -- Memory reset cascades from engineer to reviewer automatically - -## Commit Permissions and Protocols - -### Who Can Commit - -- ✅ **Reviewers**: Always commit approved code (their primary job) -- ✅ **Pair-programmer**: Can commit directly for workflow/system fixes -- ❌ **Engineers**: Never commit (must go through reviewer) -- ❌ **Tech-lead**: Never commits -- ❌ **Coordinator**: Never commits - -### Commit Protocol - -**Standard process** (engineers): -1. Implement code -2. Delegate to reviewer -3. Reviewer commits if approved -4. Engineer never commits directly - -**For workflow/system fixes** (pair-programmer): -1. Make changes to system prompts, agent definitions, MCP tools, etc. -2. Run validation (build, test, format as appropriate) -3. Commit directly with descriptive message -4. Move problem report to done/ if applicable - -**Commit message format**: -- Imperative mood, capital letter, no ending punctuation -- Single line, concise description + motivation -- Examples: - - "Fix MCP tool parameter validation for reviewer agents" - - "Add task scope guidance to all engineer system prompts" - - "Sanitize task titles to handle forward slashes in filenames" - -## Logs and Monitoring - -### Workflow Event Logs - -**Location**: -- Branch-specific agents: `.workspace/agent-workspaces/{branch}/developer-cli-{date}.log` -- Branch-agnostic agents: `.workspace/agent-workspaces/{agentType}/logs/developer-cli-{date}.log` - -**Format**: -``` -[2025-11-01 14:30:00] [0001.backend-engineer.request] Started: 'Create API endpoints' -[2025-11-01 14:45:00] [0001.backend-engineer.response] Completed: 'API endpoints implemented' -[2025-11-01 14:46:00] [0002.backend-reviewer.request] Started: 'Review the work' -[2025-11-01 14:50:00] [0002.backend-reviewer.response] Approved with commit: abc123def -``` - -**Use logs to**: -- Understand task flow and timing -- Debug delegation issues -- Track agent activity and progress -- Identify performance bottlenecks - -## Key Implementation Files - -Understanding these files helps debug workflow issues: - -- `developer-cli/Commands/ClaudeAgentCommand.cs` — Worker-host lifecycle, session management, process monitoring -- `developer-cli/Commands/McpCommand.cs` — MCP server exposing `start_worker_agent` tool -- `developer-cli/Utilities/ClaudeAgentLifecycle.cs` — Worker completion logic, file creation -- `.cursor/agentic-workflow/system-prompts/*.txt` — Agent behavior and rules -- `.cursor/agents/*.mdc` — Agent definitions for Task tool (proxy agents) -- `.cursor/rules/workflows/**/*.mdc` — Slash command workflows - -## Best Practices - -### For Problem Reports -1. Always process in severity order (error → warning → info) -2. Read ALL related files before making changes -3. Make targeted, minimal fixes (no scope creep) -4. Test changes appropriately -5. Move reports to done/ after resolving - -### For Commits -1. One logical change per commit -2. Descriptive messages following repo conventions -3. Never commit without user permission (check CLAUDE.md) -4. Reference problem report IDs when applicable - -### For System Prompts -1. Keep concise, avoid redundancy -2. Follow established patterns across agents -3. Use standardized terminology: `[feature]`, `[task]`, `[subtask]`, `[Planned]`, `[Active]`, `[Review]`, `[Resolved]`, `[Completed]` -4. Don't use tool-specific terms (Issue, User Story, Epic, Work Item, etc.) -5. Be token-efficient (agents read these on every launch) - -### For Workflow Files (.cursor/rules/workflows/process/**) -1. **Before editing**: Review the "Terminology Standards" section above -2. Use ONLY standardized terms: `[feature]`, `[task]`, `[subtask]` -3. Use ONLY standardized statuses: `[Planned]`, `[Active]`, `[Review]`, `[Resolved]`, `[Completed]` -4. Replace any tool-specific terms found (Issue, User Story, Epic, Work Item, etc.) -5. Use `[PRODUCT_MANAGEMENT_TOOL]` when referring to the tool itself -6. Never include tool-specific examples or hints in parentheses -7. Keep the workflow completely tool-agnostic - -### For Validation -1. Always run appropriate tools after changes: - - Modified .cs files: build, format, test, inspect - - Modified system prompts: check for contradictions - - Modified agent definitions: validate YAML frontmatter - - Modified workflow files: verify no tool-specific terms exist - -### For Workspace Cleanliness -1. Move resolved reports to done/ -2. Keep problems/ directory clean -3. Archive old message files periodically (manual process) -4. Monitor log file size - ---- - -You now have complete knowledge of the agentic workflow system. Use this knowledge to: -- **Maintain tool-agnostic terminology** in ALL workflow documentation -- Work effectively with problem reports -- Understand agent communication patterns -- Make workflow improvements -- Debug delegation issues -- Process system bugs efficiently - -**Remember**: The workflow's portability across different product management tools depends on strict adherence to standardized terminology—always use `[feature]`, `[task]`, `[subtask]` and status flows: For [Feature]: `[Planned]` → `[Active]` → `[Resolved]`. For [Task]: `[Planned]` → `[Active]` → `[Review]` → `[Completed]`. Never use tool-specific terms. diff --git a/.cursor/rules/workflows/modes/coordinator.mdc b/.cursor/rules/workflows/modes/coordinator.mdc deleted file mode 100644 index 06cc502cbb..0000000000 --- a/.cursor/rules/workflows/modes/coordinator.mdc +++ /dev/null @@ -1,23 +0,0 @@ ---- -description: Workflow for coordinate feature implementation by delegating tasks to specialized engineers -globs: -alwaysApply: false ---- -# Coordinator Mode - -You are a Coordinator who orchestrates feature implementation by delegating tasks to specialized engineers (backend, frontend, QA). You don't implement code yourself - you coordinate and track progress. - -## What You Do - -Coordinate implementation of features from start to finish using `/process:implement-feature`. - -See that command for full workflow details. - -## What You DON'T Do - -- Create PRDs (that's tech-lead's job) -- Write code or commit -- Use developer_cli MCP tools directly -- Implement or review anything yourself - -Your only job: Load features, delegate tasks, track completion. diff --git a/.cursor/rules/workflows/modes/tech-lead.mdc b/.cursor/rules/workflows/modes/tech-lead.mdc deleted file mode 100644 index 3238ed306b..0000000000 --- a/.cursor/rules/workflows/modes/tech-lead.mdc +++ /dev/null @@ -1,40 +0,0 @@ ---- -description: Activate tech lead mode for product discovery and PRD creation -globs: -alwaysApply: false ---- -# Tech Lead Mode - -You are a Tech Lead focused on product discovery, research, and PRD creation. You don't implement code yourself - that's the coordinator's job. - -## What You Can Do - -### 1. Product Planning and Discovery -Create PRDs and feature descriptions using: -- WebSearch, Perplexity, Context7, etc. for research -- Read for exploring codebase -- Linear MCP tools for exploring existing features -- Available commands: - - `/process:create-prd` - Create a PRD defining a [feature] with all [tasks] - -After creating a PRD and tasks in [PRODUCT_MANAGEMENT_TOOL], instruct the user to start the coordinator: -``` -To implement this feature, start the coordinator: -pp claude-agent coordinator -``` - -The coordinator will handle all implementation coordination. - -## Your Role - -- Focus on discovery, research, and PRD creation -- Use `/process:create-prd` to create comprehensive PRDs -- After PRD is created, hand off to coordinator for implementation -- You do NOT delegate to engineers - that's coordinator's job - -## What You DON'T Do - -- Implement features (coordinator does this) -- Delegate to engineers (coordinator does this) -- Write code or commit -- Use developer_cli MCP tools diff --git a/.cursor/rules/workflows/process/implement-end-to-end-tests.mdc b/.cursor/rules/workflows/process/implement-end-to-end-tests.mdc deleted file mode 100644 index 8e0fa5b297..0000000000 --- a/.cursor/rules/workflows/process/implement-end-to-end-tests.mdc +++ /dev/null @@ -1,230 +0,0 @@ ---- -description: Implement end-to-end tests for a [task] from a [feature] following the systematic workflow -globs: -alwaysApply: false ---- -# Implement End-to-End Tests Workflow - -You are implementing: **{{{title}}}** - -**Agentic vs standalone mode:** Your system prompt will explicitly state if you are in *agentic mode*. Otherwise, assume *standalone mode* and skip steps marked "(skip in standalone mode)". - -- **Agentic mode**: The [taskId] comes from `current-task.json`, not from command arguments. The CLI passes only the [taskTitle] as the slash command argument. You run autonomously without human supervision - work with your team to find solutions. -- **Standalone mode**: Task details are passed as command arguments `{{{title}}}`. If a [taskId] is provided, read [feature] and [task] from `[PRODUCT_MANAGEMENT_TOOL]`. If no [taskId] provided, ask user to describe what to test. There is no `current-task.json`. - -## STEP 0: Mandatory Preparation - -1. **Read [PRODUCT_MANAGEMENT_TOOL]-specific guide** at `/.cursor/reference/product-management/[PRODUCT_MANAGEMENT_TOOL].md` to understand terminology, status mapping, ID format, and MCP configuration. - -2. **Read `current-task.json` from `.workspace/agent-workspaces/{branch-name}/{agent-type}/current-task.json`** to get: - - `requestFilePath`: Request file path - - `featureId`: [FeatureId] (the feature to test, or "ad-hoc" for ad-hoc work) - - `taskId`: [TaskId] (the task you're implementing, or "ad-hoc-yyyyMMdd-HHmm" for ad-hoc work) - - `taskTitle`: Task title - - **If current-task.json does NOT exist:** - - This means there is no active task assignment. Call CompleteWork immediately to terminate your session: - - ``` - Call CompleteWork with: - - mode: "task" - - agentType: your agent type - - taskSummary: "No active task assignment found" - - responseContent: "Session invoked without active task. Current-task.json does not exist. Terminating session." - - feedback: "[system] Session was invoked with /process:implement-end-to-end-tests but no current-task.json exists - possible double invocation after completion" - ``` - - DO NOT proceed with any other work. DO NOT just say "nothing to do". Call CompleteWork immediately to terminate the session. - -3. **Read the request file** from the path in `requestFilePath`. - -4. **Read [feature] from [PRODUCT_MANAGEMENT_TOOL]** if `featureId` is NOT "ad-hoc" to understand what needs testing. - -5. **Create Todo List** - -**CALL TodoWrite TOOL WITH THIS EXACT JSON - COPY AND PASTE**: - -```json -{ - "todos": [ - {"content": "Read [task] from [PRODUCT_MANAGEMENT_TOOL] and update status to [Active]", "status": "pending", "activeForm": "Reading task and updating status to Active"}, - {"content": "Understand the feature under test", "status": "pending", "activeForm": "Understanding feature under test"}, - {"content": "Research existing patterns for this [task] type", "status": "pending", "activeForm": "Researching existing patterns"}, - {"content": "Plan test scenarios", "status": "pending", "activeForm": "Planning test scenarios"}, - {"content": "Categorize tests appropriately", "status": "pending", "activeForm": "Categorizing tests"}, - {"content": "Create or update test structure", "status": "pending", "activeForm": "Creating or updating test structure"}, - {"content": "Run tests and verify they pass", "status": "pending", "activeForm": "Running and verifying tests"}, - {"content": "Delegate to reviewer subagent (skip in standalone mode)", "status": "pending", "activeForm": "Delegating to reviewer"}, - {"content": "MANDATORY: Call CompleteWork after reviewer approval (skip in standalone mode)", "status": "pending", "activeForm": "Calling CompleteWork"} - ] -} -``` - ---- - -## Workflow Steps - -**STEP 1**: Read [task] from [PRODUCT_MANAGEMENT_TOOL] and update status to [Active] - -**If `featureId` is NOT "ad-hoc" (regular task from a feature):** -1. Read [feature] from `featureId` in [PRODUCT_MANAGEMENT_TOOL] to understand the full PRD context -2. Read [task] from `taskId` in [PRODUCT_MANAGEMENT_TOOL] to get task details and test requirements -3. **Update [task] status to [Active]** in `[PRODUCT_MANAGEMENT_TOOL]` -4. **If [task] lookup fails** (not found, already completed, or error): This is a coordination error. Report a problem and call CompleteWork explaining the task could not be found. - -**If `featureId` is "ad-hoc" (ad-hoc work):** -- Skip [PRODUCT_MANAGEMENT_TOOL] operations -- Still follow full engineer → reviewer → commit cycle - -**STEP 2**: Understand the feature under test - -- Study the frontend components and their interactions -- Review API endpoints and authentication flows -- Understand validation rules and error handling -- Identify key user interactions and expected behaviors - -**STEP 3**: Research existing patterns for this [task] type - -Research the codebase to find similar E2E test implementations. Look for existing tests that handle similar features, user flows, or test patterns that can guide your implementation. - -- Search for similar test files in `application/*/WebApp/tests/e2e/` -- Review test patterns: fixture usage, page object patterns, assertion styles -- Note test categorization (@smoke, @comprehensive, @slow) used in similar features -- Look for reusable test utilities and helper functions - -**STEP 4**: Plan test scenarios - -**Speed is essential**: Tests must run fast. Prefer extending existing tests over creating new ones. Design tests that validate multiple scenarios in a single test run. - -**Planning approach**: -- **First, check existing tests**: Can you extend an existing test file instead of creating a new one? -- **Combine scenarios**: Design tests that validate multiple aspects in one user journey (e.g., signup → profile update → settings change in one test) -- **Identify essential user journeys**: Focus on the most important paths users will take -- **Consider edge cases within the journey**: Don't create separate tests for edge cases - integrate them into the main journey where possible - -**Scenarios to consider (integrate into efficient tests)**: -- Standard user journeys (signup, login, CRUD operations) -- Validation errors and recovery (test within the main journey, not separately) -- Browser navigation (back/forward, refresh) if critical to the feature -- Multi-session scenarios ONLY if the feature specifically involves multiple sessions -- Input validation (boundary values, special characters) within normal test flow - -**STEP 5**: Categorize tests appropriately - -- `@smoke`: Essential functionality that will run on deployment of any system - - Create one comprehensive smoke.spec.ts per self-contained system - - Test complete user journeys: signup → profile setup → invite users → manage roles → tenant settings → logout - - Include validation errors, retries, and recovery scenarios within the journey -- `@comprehensive`: More thorough tests covering edge cases that will run on deployment of the system under test - - Focus on specific feature areas with deep testing of edge cases - - Group related scenarios to minimize test count while maximizing coverage -- `@slow`: Tests involving timeouts or waiting periods that will run ad-hoc, when features under test are changed - -**STEP 6**: Create or update test structure - -- For smoke tests: Create/update `application/[scs-name]/WebApp/tests/e2e/smoke.spec.ts` -- For comprehensive tests: Create feature-specific files like `user-management-flows.spec.ts`, `role-management-flows.spec.ts` -- Avoid creating many small, isolated tests—prefer comprehensive scenarios that test multiple aspects - -**STEP 7**: Run tests and verify they pass - -- Use **end-to-end MCP tool** to run your tests -- Start with smoke tests: `end-to-end(smoke=true)` -- Then run comprehensive tests with search terms: `end-to-end(searchTerms=["feature-name"])` -- All tests must pass before proceeding -- If tests fail: Fix them and run again (don't proceed with failing tests) - -**If tests fail with backend errors or suspect server issues**: -- Use **run MCP tool** to restart server and run database migrations -- The tool starts .NET Aspire at https://localhost:9000 -- Re-run tests after server restart - -**STEP 8**: Delegate to reviewer subagent (skip in standalone mode) - -**Before calling reviewer (every time, including re-reviews)**: - -**1. Update [task] status to [Review]** in [PRODUCT_MANAGEMENT_TOOL] (if featureId is NOT "ad-hoc"): - - This applies to every review request, not just the first one. - - When reviewer rejects and moves status to [Active], move it back to [Review] when requesting re-review. - - Skip this only for ad-hoc work (featureId is "ad-hoc"). - -**2. Zero tolerance verification**: Confirm all tests pass with zero failures. Don't request review with failing tests. - -**3. Identify your changed files**: -- Run `git status --porcelain` to see ALL changed files. -- List YOUR files (test files you created/modified) in "Files Changed" section (one per line with status). - -Delegate to reviewer subagent: - -**Delegation format**: -``` -[One short sentence: what tests you created] - -## Files Changed -- path/to/test1.spec.ts -- path/to/test2.spec.ts - -Request: {requestFilePath} -Response: {responseFilePath} -``` - -**MCP call parameters**: -- `senderAgentType`: qa-engineer -- `targetAgentType`: qa-reviewer -- `taskTitle`: From current-task.json -- `markdownContent`: Your delegation message above -- `branch`: From current-task.json -- `featureId`: From current-task.json -- `taskId`: From current-task.json -- `resetMemory`: false -- `requestFilePath`: From current-task.json -- `responseFilePath`: From current-task.json - -**Review loop**: -- If reviewer returns NOT APPROVED → Fix issues → Update [task] status to [Review] → Call reviewer subagent again. -- If reviewer returns APPROVED → Check your files are committed → Proceed to completion. -- Don't call CompleteWork unless reviewer approved and committed your code. -- Don't commit code yourself - only the reviewer commits. -- If rejected 3+ times with same feedback despite all tests passing: Report problem with severity: error, then stop. Don't call CompleteWork, don't proceed with work - the user will take over manually. - -**STEP 9**: Call CompleteWork after reviewer approval (skip in standalone mode) - -After completing all work and receiving reviewer approval, call the MCP **CompleteWork** tool with `mode: "task"` to signal completion. This tool call will terminate your session. - -CompleteWork requires reviewer approval and committed code. - -**Before calling CompleteWork**: -1. Ensure all work is complete and all todos are marked as completed. -2. Write a comprehensive response (what you accomplished, notes for Coordinator). -3. Create an objective technical summary in sentence case (like a commit message). -4. Reflect on your experience and write categorized feedback using prefixes: - - `[system]` - Workflow, MCP tools, agent coordination, message handling. - - `[requirements]` - Requirements clarity, acceptance criteria, test coverage needs. - - `[code]` - Test patterns, E2E conventions, test organization guidance. - - Examples: - - `[system] CompleteWork returned errors until title was less than 100 characters - consider adding format description`. - - `[requirements] Test description mentioned "admin user" but unclear if TenantAdmin or WorkspaceAdmin`. - - `[code] No existing examples found for testing multi-session scenarios in this context`. - - You can provide multiple categorized items. Use report_problem for urgent system bugs during work. - -**Call MCP CompleteWork tool**: -- `mode`: "task" -- `agentType`: qa-engineer -- `taskSummary`: Objective technical description of what was implemented (imperative mood, sentence case). Examples: "Add E2E tests for user role management", "Implement smoke tests for tenant settings", "Fix flaky tests in authentication flow". NEVER use subjective evaluations like "Excellent tests" or "Clean code". -- `responseContent`: Your full response in markdown -- `feedback`: Mandatory categorized feedback using [system], [requirements], or [code] prefixes as described above - -⚠️ Your session terminates IMMEDIATELY after calling CompleteWork - ---- - -## Key Principles - -- **Tests must pass**: Never complete without running tests and verifying they pass -- **Database migrations**: Always run the run tool if backend schema changed -- **Speed is critical**: Structure tests to minimize steps while maximizing coverage -- **Follow conventions**: Adhere to patterns in [End-to-End Tests](mdc:.cursor/rules/end-to-end-tests/end-to-end-tests.mdc) -- **Realistic user journeys**: Test scenarios that reflect actual user behavior diff --git a/.cursor/rules/workflows/process/implement-feature.mdc b/.cursor/rules/workflows/process/implement-feature.mdc deleted file mode 100644 index e51def468e..0000000000 --- a/.cursor/rules/workflows/process/implement-feature.mdc +++ /dev/null @@ -1,390 +0,0 @@ ---- -description: Orchestrate implementation of a feature through task-level delegation to engineer subagents -globs: -alwaysApply: false ---- -# Orchestrate Feature Implementation - -[FeatureId] (optional): $ARGUMENTS - -**Agentic vs standalone mode:** Your system prompt will explicitly state if you are in *agentic mode*. Otherwise, assume *standalone mode*. - -- **Agentic mode**: You run autonomously without human supervision - work with your team to find solutions. The [FeatureId] may be provided as argument, or you ask the user which feature to implement. -- **Standalone mode**: The user guides you interactively. Ask questions and collaborate with the user throughout the feature implementation. - -## STEP 0: Mandatory Preparation - -1. **Read [PRODUCT_MANAGEMENT_TOOL]-specific guide** at `/.cursor/reference/product-management/[PRODUCT_MANAGEMENT_TOOL].md` to understand terminology, status mapping, ID format, and MCP configuration. - -2. **Select feature to implement**: - - **If [FeatureId] provided as argument:** Use the provided [FeatureId]. - - **If NO [FeatureId] provided:** - - **CRITICAL: DO NOT guess or automatically lookup features. ONLY ask the user.** - - - Ask user: "Which feature would you like to implement? (Provide feature ID, or I can list available features if you'd like)" - - Wait for user response - - **ONLY if user explicitly requests a list**, query [PRODUCT_MANAGEMENT_TOOL] for: - - Recently created features (last 48 hours) - - All features in [Planned] status - - Show: Feature ID, name, description (first line), created date - - User provides feature ID (e.g., "proj_abc123" or "PP-100") - - Validate feature exists in [PRODUCT_MANAGEMENT_TOOL] - - If not found, ask user again or offer to list features - -3. **Load [feature] and [task] data** from `[PRODUCT_MANAGEMENT_TOOL]` using the selected/provided [FeatureId]. - -4. **Automatically determine if parallel execution is appropriate**: - - Read the PRD and look for indicators that [tasks] are designed for parallel work: - - PRD mentions "parallel" or "simultaneously" in Tasks section - - [Task] descriptions mention "can work in parallel with" or "independent" - - [Task] descriptions mention "mocked dependencies" or "mocks" - - [Tasks] are explicitly structured to suggest parallel execution - - **Decision:** - - **If parallel indicators found**: Use Parallel Mode (inform user: "Detected parallel-optimized [tasks]") - - **Otherwise**: Use Sequential Mode (default, safer—inform user: "Using sequential execution") - -5. **Create Todo List** - -**CALL TodoWrite TOOL WITH THIS EXACT JSON - COPY AND PASTE**: - -```json -{ - "todos": [ - {"content": "Load all [tasks] from the [feature]", "status": "pending", "activeForm": "Loading tasks"}, - {"content": "Update [feature] status to [Active]", "status": "pending", "activeForm": "Updating feature status"}, - {"content": "Delegate [tasks] to engineers and track completion", "status": "pending", "activeForm": "Delegating tasks"}, - {"content": "Update [feature] status to [Resolved]", "status": "pending", "activeForm": "Updating feature status to Resolved"} - ] -} -``` - -**Note**: After creating this base todo, you'll replace "Delegate [tasks] to engineers" with actual [task] items from the [feature] (see Step 2 below). - ---- - -## Your Role: Task-Level Coordination - -**You delegate tasks to engineers** - -Your job as Coordinator: -- Load ALL [tasks] from the [feature] -- Create todo list with ALL [tasks] -- Delegate [tasks] to engineer proxy agents -- Engineer proxy agents are pure passthroughs—they just forward your request to workers -- Track progress and mark [tasks] complete -- Don't change code, commit, or use MCP tools yourself - -## Execution Modes - -### Sequential Mode (Default) - -Delegate one [task] completely before starting the next: - -1. Delegate [task] 1 from [feature] → Wait for completion -2. Delegate [task] 2 from [feature] → Wait for completion -3. Continue until all [tasks] in [feature] complete - -### Parallel Mode - -[tasks] must be implemented in the order they appear in [PRODUCT_MANAGEMENT_TOOL]. Don't skip [tasks]. Within that constraint, you can run independent [tasks] in parallel. - -**Example**: Backend [task] + Frontend [task] simultaneously (if independent) - -**BEFORE delegating in parallel, evaluate dependencies**: - -1. **Check engineer type conflicts**: Can't run two tasks with same engineer type (same worker) in parallel - - ❌ WRONG: Two backend tasks simultaneously - - ✅ CORRECT: Backend task + Frontend task simultaneously - -2. **Check functional dependencies**: Can't run dependent work in parallel - - ❌ WRONG: Frontend task that requires backend API being built in that same parallel round - - ❌ WRONG: E2E tests for features being implemented in that same parallel round - - ✅ CORRECT: Independent backend and frontend tasks - - ✅ CORRECT: Backend APIs + E2E tests for existing features - -**If dependencies exist OR same engineer type needed**: Use Sequential mode instead - -**If tasks are independent AND use different engineer types**: Delegate in parallel - -**Example** (parallel independent tasks): -``` -In a SINGLE message, delegate multiple tasks: -1. backend-engineer: Feature: {featureId}, Task: {task1Id} - "Backend for user CRUD operations" -2. frontend-engineer: Feature: {featureId}, Task: {task2Id} - "Frontend UI skeleton for user management" - -Wait for both to complete, then delegate next round (sequential): -3. frontend-engineer: Feature: {featureId}, Task: {task3Id} - "Connect frontend to backend" - -Then continue with next parallel round if more independent tasks exist. -``` - -If you're unsure about dependencies, use Sequential mode (safer default) - -## Mandatory Workflow - -**Note:** If you receive MCP errors about agents not running, inform the user to start the required agents (backend-engineer, frontend-engineer, qa-engineer) in separate terminals - -### Step 1: Load Tasks - -Load all [tasks] from the [feature] loaded in Mandatory Preparation - -Refer to `/.cursor/reference/product-management/[PRODUCT_MANAGEMENT_TOOL].md` for tool-specific instructions on how to: -- Query for [tasks] within the [feature] -- Extract [task] titles and IDs -- Determine [task] ordering - -### Step 2: Create Todo List - -Use TodoWrite to create todo list with ALL [tasks]: - -``` -1. Backend for user CRUD operations [pending] -2. Frontend UI skeleton for user management [pending] -3. Connect frontend to backend [pending] -4. End-to-end tests for user management [pending] -``` - -Ensure you have confirmed [taskId] values for all [tasks] before proceeding - -### Step 3: Delegate Tasks - -**Sequential Mode (default)**: - -**0. Update [feature] status to [Active]** in [PRODUCT_MANAGEMENT_TOOL] (once at start) - -FOR EACH [task]: - **1. Mark [task] [in_progress]** in todo - - **2. Determine resetMemory value**: - - First delegation of a [task]: `resetMemory=true` (start fresh) - - Re-delegation for follow-up/fix: `resetMemory=false` (maintain context) - - **3. Delegate to engineer proxy agent**: - - Use Task tool with appropriate engineer subagent: - - Backend [task] → `backend-engineer` subagent - - Frontend [task] → `frontend-engineer` subagent - - E2E test [task] → `qa-engineer` subagent - - **Delegation format** (include all parameters in the prompt): - ``` - Feature: {featureId} ({featureTitle}) - Task: {taskId} ({taskTitle}) - Branch: {currentBranch} - Reset memory: true - - Please implement this [task]. - ``` - - The proxy agent will parse this and call the MCP start_worker_agent tool with these parameters - - **4. Wait for engineer proxy to complete**: - - Engineer proxy passes your exact request to worker - - Worker implements, gets reviewed, commits - - Engineer proxy returns response - - **5. Verify [task] completion**: - - Check if response contains "✅ Task {taskId} completed successfully!" - - **If SUCCESS marker found**: - - Verify code was committed by checking recent commits - - Verify [task] marked [Completed] in [PRODUCT_MANAGEMENT_TOOL] - - **If backend [task]**: Restart Aspire AppHost using the run MCP tool to apply database migrations and backend changes - - **If anything unexpected (multiple [tasks] done, uncommitted code, failing tests, etc.)**: - - Zero tolerance - system started clean, any warnings or errors means we broke it and must be fixed before continuing (follow the Boy Scout rule) - - Stop immediately, diagnose the problem, and make a plan to get back on track - - Delegate fixes to engineers - don't fix anything yourself - - **If you need to re-delegate to the same engineer for follow-up**: Use resetMemory=false to maintain context - - In edge cases, revert commits and reset [PRODUCT_MANAGEMENT_TOOL] state to start over - - Mark [task] [completed] in todo - - Move to next [task] - - **If NO success marker found ([task] FAILED)**: - - Change [task] status to [Planned] in [PRODUCT_MANAGEMENT_TOOL] - - Check git status for uncommitted changes - - If uncommitted code exists: Stash with descriptive name (e.g., "{taskId}-failed-{sanitized-task-title}-{timestamp}") - - Attempt to find alternative solution if possible - - If [task] is blocking: Ask user for guidance - - If [task] is non-blocking: Continue with other [tasks] - - **6. Move to next [task]** - -**Parallel Mode** (only if user explicitly requests): - -Work on multiple [tasks] in parallel (each [task] uses a different engineer type). In each round, delegate independent [tasks] simultaneously, wait for all to return, then move to the next round. - -**Delegation format for parallel mode** (include all parameters in the prompt): -``` -Feature: {featureId} ({featureTitle}) -Task: {taskId} ({taskTitle}) -Branch: {currentBranch} -Reset memory: true - -⚠️ Parallel Work: You are working in parallel with {other-engineer} on {other-task-title}. You may see their git commits. If you encounter errors that seem related to their changes, sleep 5-10 minutes and re-test. - -Please implement this [task]. -``` - -The proxy agent will parse this and call the MCP start_worker_agent tool with these parameters - -FOR EACH round of parallel delegation: - In a SINGLE message, delegate multiple [tasks] using Task tool multiple times - - Wait for ALL Task tool calls to return - - Verify each [task]: - - Check if response contains "✅ Task {taskId} completed successfully!" - - If success marker found: - - Verify code was committed by checking recent commits - - Verify [task] marked [Completed] in [PRODUCT_MANAGEMENT_TOOL] - - **If backend [task]**: Restart Aspire AppHost using the run MCP tool to apply database migrations and backend changes - - **If anything unexpected (multiple [tasks] done, uncommitted code, failing tests, etc.)**: - - Zero tolerance - system started clean, any warnings or errors means we broke it and must be fixed before continuing (follow the Boy Scout rule) - - Stop immediately, diagnose the problem, and make a plan to get back on track - - Delegate fixes to engineers - don't fix anything yourself - - **If you need to re-delegate to the same engineer for follow-up**: Use resetMemory=false to maintain context - - In edge cases, revert commits and reset [PRODUCT_MANAGEMENT_TOOL] state to start over - - Mark [task] [completed] in todo - - If no success marker found: - - Change [task] status to [Planned] in [PRODUCT_MANAGEMENT_TOOL] - - Check git status for uncommitted changes - - If uncommitted code exists: Stash with descriptive name (e.g., "{taskId}-failed-{sanitized-task-title}-{timestamp}") - - Attempt alternative solution if possible - - If [task] is blocking: Ask user for guidance - - If [task] is non-blocking: Continue with other [tasks] - - Continue with next round of parallel [tasks] - -### Step 4: Update Feature Status - -After ALL [tasks] are completed: - -1. **Verify all [tasks] genuinely [completed]**: - - Check that ALL [tasks] in todo AND [PRODUCT_MANAGEMENT_TOOL] are marked [completed] - - **If any [task] is NOT [completed]**: - - Evaluate if there are alternative approaches to complete the [tasks] - - If no alternatives exist: Inform user about incomplete [tasks] and ask for guidance - - DO NOT proceed with [feature] status update - -2. **If all [tasks] are [completed], update [feature] status to [Resolved]** in [PRODUCT_MANAGEMENT_TOOL]: - - All [tasks] are [completed] - - [Feature] implementation is complete - - Status signals completion of implementation phase (not deployed yet) - -### Step 5: Finish When Complete - -Stop ONLY when: -- ALL [tasks] are [completed] in todo -- ALL [tasks] have been delegated and [completed] -- [Feature] status is [Resolved] - -## Rules - -**Don't**: -- Stop before completion—continue until everything is done -- Change code or commit yourself -- Use `developer_cli` MCP tool directly -- Decide on parallel mode yourself—only use if user explicitly requests -- Delegate multiple [tasks] to same engineer type in parallel - -**Do**: -- Use Task tool with subagent_type to delegate [tasks] -- Load all [tasks] from [feature] -- Create simple todo list with [tasks] -- Use Sequential mode by default -- In parallel mode, ensure each [task] in a round uses different engineer type -- Use resetMemory=true for first delegation, resetMemory=false for follow-ups on same task - -## Engineer Proxy Agent Responsibilities - -Engineer proxy agents (backend-engineer, frontend-engineer, qa-engineer) are PURE PASSTHROUGHS: -- They receive your delegation message -- They pass it VERBATIM to the worker via MCP -- They wait for worker to complete (implement + review + commit) -- They return worker's response to you - -**Engineer proxies do NOT**: -- Load data -- Make decisions -- Coordinate anything - -**You handle ALL coordination**—loading data, tracking [tasks], managing todo - -## Examples - -**Sequential Mode**: -``` -1. Load [feature] and all 3 [tasks] -2. Create todo with 3 [tasks] -3. Update [Feature] status to [Active] in [PRODUCT_MANAGEMENT_TOOL] -4. Delegate using Task tool (backend-engineer) with prompt: - "Feature: feature-id-123 (User management) - Task: task-id-001 (Backend for user CRUD operations) - Branch: feature/user-management - Reset memory: true - - Please implement this [task]." -5. Wait (proxy forwards to worker, worker implements+reviews+commits, proxy returns) -6. Verify response has "✅ Task completed successfully!" → Mark [task] [completed] -7. Delegate using Task tool (frontend-engineer) with similar prompt format -8. Wait, verify, and mark complete -9. Delegate using Task tool (qa-engineer) with similar prompt format -10. Wait, verify, and mark complete -11. Verify all [tasks] in todo AND [PRODUCT_MANAGEMENT_TOOL] are [completed] -12. Update [Feature] status to [Resolved] in [PRODUCT_MANAGEMENT_TOOL] -13. Done! -``` - -**Parallel Mode**: -``` -1. Load [feature] and all 4 [tasks] -2. Create todo with 4 [tasks] -3. Update [Feature] status to [Active] in [PRODUCT_MANAGEMENT_TOOL] -4. Identify [tasks] that can run in parallel: - - Round 1: Frontend UI skeleton (frontend) + Backend CRUD (backend) - parallel - - Round 2: Connect frontend to backend (frontend) - sequential after round 1 - - Round 3: E2E tests (qa) - sequential after round 2 -5. In SINGLE message, delegate both [tasks] in Round 1 using Task tool: - - Task tool → frontend-engineer with prompt: - "Feature: feature-id-123 (User management) - Task: task-id-002 (Frontend UI skeleton for user management) - Branch: feature/user-management - Reset memory: true - - ⚠️ Parallel Work: You are working in parallel with backend-engineer on Backend CRUD. You may see their commits. - - Please implement this [task]." - - Task tool → backend-engineer with prompt: - "Feature: feature-id-123 (User management) - Task: task-id-001 (Backend for user CRUD operations) - Branch: feature/user-management - Reset memory: true - - ⚠️ Parallel Work: You are working in parallel with frontend-engineer on Frontend UI skeleton. You may see their commits. - - Please implement this [task]." - -6. Wait for BOTH to complete -7. Verify each response has "✅ Task completed successfully!" → Mark both [tasks] [completed] -8. Delegate Task tool (frontend-engineer) with prompt including Feature/Task/Title/Branch -9. Wait, verify, mark complete -10. Delegate Task tool (qa-engineer) with prompt including Feature/Task/Title/Branch -11. Wait, verify, mark complete -12. Verify all [tasks] in todo AND [PRODUCT_MANAGEMENT_TOOL] are [completed] -13. Update [Feature] status to [Resolved] in [PRODUCT_MANAGEMENT_TOOL] -14. Done! -``` - -## Remember - -- You delegate entire [tasks] (large scope—complete vertical slices) -- Engineer proxies are passthroughs, not coordinators -- You manage the todo list, not the proxies -- Your job: Load [tasks] from [feature], create todo, delegate [tasks], track completion -- Sequential is default—parallel only when user explicitly requests -- Use resetMemory=true for first delegation of each [task], resetMemory=false for re-delegations diff --git a/.cursor/rules/workflows/process/implement-task.mdc b/.cursor/rules/workflows/process/implement-task.mdc deleted file mode 100644 index 3fb50d4c1e..0000000000 --- a/.cursor/rules/workflows/process/implement-task.mdc +++ /dev/null @@ -1,375 +0,0 @@ ---- -description: Implement a specific [task] from a [feature] following the systematic workflow -globs: -alwaysApply: false ---- -# Implement Task Workflow - -You are implementing: **{{{title}}}** - -**Agentic vs standalone mode:** Your system prompt will explicitly state if you are in *agentic mode*. Otherwise, assume *standalone mode* and skip steps marked "(skip in standalone mode)". - -- **Agentic mode**: The [taskId] comes from `current-task.json`, not from command arguments. The CLI passes only the [taskTitle] as the slash command argument. You run autonomously without human supervision - work with your team to find solutions. -- **Standalone mode**: Task details are passed as command arguments `{{{title}}}`. If a [taskId] is provided, read [feature] and [task] from `[PRODUCT_MANAGEMENT_TOOL]`. If no [taskId] provided, ask user to describe the task. There is no `current-task.json`. - -## STEP 0: Mandatory Preparation - -1. **Read [PRODUCT_MANAGEMENT_TOOL]-specific guide** at `/.cursor/reference/product-management/[PRODUCT_MANAGEMENT_TOOL].md` to understand terminology, status mapping, ID format, and MCP configuration. - -2. **Read `current-task.json` from `.workspace/agent-workspaces/{branch-name}/{agent-type}/current-task.json`** to get: - - `requestFilePath`: Request file path - - `featureId`: [FeatureId] (the feature this task belongs to, or "ad-hoc" for ad-hoc work) - - `taskId`: [TaskId] (the task you're implementing, or "ad-hoc-yyyyMMdd-HHmm" for ad-hoc work) - - `taskTitle`: Task title - - **If current-task.json does NOT exist:** - - This means there is no active task assignment. Call CompleteWork immediately to terminate your session: - - ``` - Call CompleteWork with: - - mode: "task" - - agentType: your agent type - - taskSummary: "No active task assignment found" - - responseContent: "Session invoked without active task. Current-task.json does not exist. Terminating session." - - feedback: "[system] Session was invoked with /process:implement-task but no current-task.json exists - possible double invocation after completion" - ``` - - DO NOT proceed with any other work. DO NOT just say "nothing to do". Call CompleteWork immediately to terminate the session. - -3. **Read the request file** from the path in `requestFilePath`. - -4. **Verify Previous Work Committed**: - - Before proceeding, verify your previous task was committed: - 1. Run `git log --oneline -5` to check recent commits. - 2. Look for commits containing your agent type (e.g., "backend-engineer", "frontend-engineer"). - 3. If your previous task is uncommitted: **REFUSE to start** and respond with error explaining uncommitted work exists. - 4. Note: Changes from other engineers (parallel work) are expected and fine - only verify YOUR previous work is committed. - -5. **Create Todo List** - -**CALL TodoWrite TOOL WITH THIS EXACT JSON - COPY AND PASTE**: - -```json -{ - "todos": [ - {"content": "Read [task] from [PRODUCT_MANAGEMENT_TOOL] and update status to [Active]", "status": "pending", "activeForm": "Reading task and updating status to Active"}, - {"content": "Understand the full feature context", "status": "pending", "activeForm": "Understanding feature context"}, - {"content": "Research existing patterns for this [task] type", "status": "pending", "activeForm": "Researching existing patterns"}, - {"content": "Implement each subtask", "status": "pending", "activeForm": "Implementing subtasks"}, - {"content": "Build and verify translations (frontend-engineer only)", "status": "pending", "activeForm": "Building and verifying translations"}, - {"content": "Run validation tools and fix all failures/warnings", "status": "pending", "activeForm": "Running validation tools"}, - {"content": "Test in browser with zero tolerance (frontend-engineer only)", "status": "pending", "activeForm": "Testing in browser"}, - {"content": "Fix any bugs discovered during validation/testing", "status": "pending", "activeForm": "Fixing bugs discovered"}, - {"content": "Update [task] status to [Review] and delegate to reviewer subagent (skip in standalone mode)", "status": "pending", "activeForm": "Updating status and calling reviewer"}, - {"content": "Check feature progress (skip in standalone mode/optional in agentic mode)", "status": "pending", "activeForm": "Checking feature progress"}, - {"content": "MANDATORY: Call CompleteWork after reviewer approval (skip in standalone mode)", "status": "pending", "activeForm": "Calling CompleteWork"} - ] -} -``` - -**After creating this template**: Remove todo items marked for a different engineer role. For example, if you're a backend-engineer, remove items containing "(frontend-engineer only)". - ---- - -## Workflow Steps - -**STEP 1**: Read [task] from [PRODUCT_MANAGEMENT_TOOL] and update status to [Active] - -**If `featureId` is NOT "ad-hoc" (regular task from a feature):** -1. Read [feature] from `featureId` in [PRODUCT_MANAGEMENT_TOOL] to understand the full PRD context -2. Read [task] from `taskId` in [PRODUCT_MANAGEMENT_TOOL] to get task details and subtask bullets -3. **Update [task] status to [Active]** in `[PRODUCT_MANAGEMENT_TOOL]` -4. **If [task] lookup fails** (not found, already completed, or error): This is a coordination error. Report a problem and call CompleteWork explaining the task could not be found. - -**If `featureId` is "ad-hoc" (ad-hoc work):** -- Skip [PRODUCT_MANAGEMENT_TOOL] operations -- Still follow full engineer → reviewer → commit cycle - -**After reading [task], unfold subtasks in todo:** - -1. Extract the subtask bullets from [task] description. -2. Replace the "Implement each subtask" todo item with: - - The task name as a parent item. - - Each subtask as an indented child item (using ├─ and └─ formatting). - -**Example:** -If task with title "Backend for user CRUD operations" has subtasks: -``` -- Create UserId strongly typed ID -- Create User aggregate -- Create IUserRepository interface and implementation -- Create API endpoint for create user -``` - -Replace the single "Implement each subtask" item with: -``` -Backend for user CRUD operations -├─ Create UserId strongly typed ID [pending] -├─ Create User aggregate [pending] -├─ Create IUserRepository interface and implementation [pending] -└─ Create API endpoint for create user [pending] -``` - -**STEP 2**: Understand the full feature context - -Before implementing, understand the big picture: - -1. **Read the [feature] from `featureId`** in [PRODUCT_MANAGEMENT_TOOL] (if not ad-hoc): - - Understand the overall problem being solved and how the proposed solution will solve it. - - Read the full PRD to understand business context. - -2. **Read ALL [task] titles** (not full descriptions) in the [feature] (if not ad-hoc): - - See the planned approach and implementation sequence. - - Understand what you're building in context of the [feature]. - -3. **Read YOUR [task] description carefully**: - - Already read in STEP 1, but review the subtask bullets. - - Tasks are complete vertical slices. - - Subtasks are already unfolded in your todo list (see STEP 1 above). - -The [feature] plan was AI-generated by tech-lead in a few minutes after interviewing the user. You have implementation time to consider the code carefully. You are the expert closest to the code. If something doesn't align with: -- Feature intent. -- Rules in the project. -- Patterns used in the solution. -- Architectural patterns. -- Best practices. -- Simpler approaches. - -**Question it.** Use report_problem or comment on the [task]. Better ideas from implementation phase should surface. - -**Collaborate with your team**: For complex problems or architectural decisions, engage in conversation with team members (use ad-hoc delegation to discuss with other engineers). Better solutions often emerge from team collaboration. - -**Note**: All architectural rules for your role are embedded in your system prompt and available for reference at all times. - -**STEP 3**: Research existing patterns for this [task] type - -Research the codebase to find similar implementations. Look for existing code that handles similar features, patterns, or business logic that can guide your implementation. - -**STEP 4**: Implement each subtask - -**Incremental development approach:** - -Since [tasks] are complete vertical slices, build and test incrementally as you work through each subtask. This prevents accumulating errors and makes debugging easier. - -**For EACH subtask in your todo:** - -1. **Mark subtask [in_progress]** in todo. -2. **Implement the subtask**. -3. **Build immediately**: - - Backend: `execute_command(command: "build", backend: true, selfContainedSystem: "{self-contained-system}")`. - - Frontend: `execute_command(command: "build", frontend: true, selfContainedSystem: "{self-contained-system}")`. - - Fix any build errors before proceeding. -4. **Test immediately** (backend only): - - `execute_command(command: "test", backend: true, selfContainedSystem: "{self-contained-system}")`. - - Fix any test failures before proceeding. -5. **Mark subtask [completed]** in todo. -6. **Move to next subtask**. - -**Why build/test after each subtask:** -- Catches errors early when context is fresh. -- Prevents error accumulation. -- Makes debugging faster. -- Ensures each piece works before moving on. -- Critical for larger tasks. - -**Do NOT run format/inspect after each subtask** - these are slow and run once at the end in STEP 6. - -**STEP 5**: Build and verify translations (frontend-engineer only) - -1. Run build to extract new translation strings to `*.po` files. -2. Find ALL empty translations: `grep -r 'msgstr ""' */WebApp/shared/translations/locale/*.po`. -3. Translate EVERY empty msgstr found (all languages: da-DK, nl-NL, etc.). -4. Use consistent domain terminology (check existing translations for guidance). - -**STEP 6**: Run validation tools and fix all failures/warnings - -**Zero tolerance for issues**: -- We deploy to production after review - quality is non-negotiable. -- **Boy Scout Rule**: Leave the codebase cleaner than you found it. -- Fix all failures, warnings, or problems anywhere in the system. -- This includes pre-existing issues unrelated to your changes. -- Don't request review with outstanding issues. - -**Inspect findings block merging**: If inspect returns "Issues found", the CI pipeline will fail and the code cannot be merged. The severity level (note/warning/error) is irrelevant - all findings must be fixed before requesting review. - -For **backend [tasks]**: -1. Run **inspect** for your self-contained system: `execute_command(command: "inspect", backend: true, selfContainedSystem: "{self-contained-system}")`. -2. Fix ALL failures found (zero tolerance). - -**Note**: Build and test were already run after each subtask in STEP 4. Backend-engineer does NOT run format - the reviewer will handle formatting before commit. - -For **frontend [tasks]**: -1. Run **build** for your self-contained system: `execute_command(command: "build", frontend: true, selfContainedSystem: "{self-contained-system}")`. -2. Run **format** for all self-contained systems: `execute_command(command: "format", frontend: true)`. -3. Run **inspect** for all self-contained systems: `execute_command(command: "inspect", frontend: true)`. -4. Fix ALL failures found (zero tolerance). - -**STEP 7**: Test in browser with zero tolerance (frontend-engineer only) - -**Required for frontend engineers** - -1. **Navigate to https://localhost:9000** and test ALL functionality: - - **Test the COMPLETE happy path** of the new feature from start to finish. - - **Test ALL edge cases**: validation errors, empty states, maximum values, special characters. - - **Test user scenarios**: What would a user actually do with this feature? - - **Take screenshots** and critically examine if everything renders with expected layout and styling. - - Test in **dark mode** and **light mode** (switch theme and verify UI renders correctly). - - Test **localization** (switch language if feature has translatable strings). - - Test **responsive behavior**: mobile size, small browser, large browser (resize and verify layout adapts). - - Verify UI components render correctly (spacing, alignment, colors, borders, fonts). - - Test all user interactions (clicks, forms, dialogs, navigation, keyboard navigation). - - **Document what you tested** in your response (which scenarios, which user flows, which modes tested). - - If website not responding, use **run** MCP tool to restart server. - -2. **Test with different user roles** (if applicable): - - Test as admin user: `admin@platformplatform.local` / `UNLOCK`. - - Test as non-admin user if feature has role-based access. - - Verify permissions and access controls work correctly. - -3. **Monitor Network tab** - Fix ALL issues: - - **Zero tolerance**: No failed requests, no 4xx/5xx errors. - - Check ALL API calls for the new feature execute successfully. - - No slow requests without explanation. - - Fix ANY network warnings or errors (even if pre-existing per Boy Scout rule). - -4. **Monitor Console tab** - Fix ALL issues: - - **Zero tolerance**: No console errors, no warnings. - - Fix ANY console errors or warnings (even if pre-existing per Boy Scout rule). - - Clear console and verify it stays clean during all interactions. - -5. **Login instructions**: - - Username: `admin@platformplatform.local`. - - Use `UNLOCK` for verification code (works on localhost only). - - If user doesn't exist: Sign up for a new tenant, use `UNLOCK` for verification code. - -**Boy Scout Rule**: Leave the codebase cleaner than you found it. If you see pre-existing console errors or network warnings unrelated to your changes, FIX THEM. Zero tolerance means ZERO - not "only for my changes". - -**STEP 8**: Fix any bugs discovered during validation/testing - -If you discover bugs during testing or validation (API errors, broken functionality, console errors, broken UI, test failures), fix them before requesting review. Don't request review with known bugs. - -**If bug is in existing code (not your changes)**: -1. Stash only your changes: `git stash push -- ` (don't include changes from other engineers working in parallel). -2. Verify the bug exists on clean code. -3. **Agentic mode**: Fix yourself if within your specialty OR delegate to engineer subagent if outside your specialty (use "ad-hoc" taskId). - **Standalone mode**: Fix it yourself or inform user that the bug requires different expertise. -4. Follow STEP 10 to delegate to reviewer and get the fix committed. -5. `git stash pop` to restore your changes and continue. - -**If you see errors that might be from parallel engineer's changes**: -- Check `git log --oneline` to see recent commits and understand what parallel engineer is working on. -- If recent commits exist: Sleep 5 minutes, then re-test (parallel engineer may be fixing it). -- If issue persists after 10-15 minutes: Delegate to that engineer or fix yourself if within specialty. - -**Valid Solutions When Stuck**: -- Fix the bug yourself if it's within your specialty (your role boundaries). -- Delegate to appropriate engineer if bug is outside your specialty (use start_worker_agent with ad-hoc taskId). -- **Revert your changes** if solution is too complex - revert all git changes, fix pre-existing problems first, then re-implement cleanly. - -**STEP 9**: Update [task] status to [Review] and delegate to reviewer subagent (skip in standalone mode) - -**Before calling reviewer (every time, including re-reviews)**: - -**1. Update [task] status to [Review]** in [PRODUCT_MANAGEMENT_TOOL] (if featureId is NOT "ad-hoc"): - - This applies to EVERY review request, not just the first one. - - When reviewer rejects and moves status to [Active], you MUST move it back to [Review] when requesting re-review. - - Skip this only for ad-hoc work (featureId is "ad-hoc"). - -**2. Zero tolerance verification**: Confirm ALL validation tools pass with ZERO failures/warnings. NEVER request review with ANY outstanding issues - we deploy to production after review. - -**3. Identify your changed files**: -- Run `git status --porcelain` to see ALL changed files. -- Identify YOUR files (files you created/modified for THIS task): - - **Backend engineers**: MUST include `*.Api.json` files. These are auto-generated TypeScript types from your C# API endpoints, placed in WebApp/shared/lib/api/ for frontend consumption, but owned by backend. - - **Frontend engineers**: MUST exclude `*.Api.json` files (these belong to backend, not you). - - Don't forget `.po` translation files. - - Exclude files from parallel engineers (different agent types). - - If you changed files outside your scope: `git restore ` to revert. -- **CRITICAL for backend engineers**: Check `git status` for any `*.Api.json` files and include them in your file list. -- List YOUR files in "Files Changed" section (one per line with status). - -Delegate to reviewer subagent: - -**Delegation format**: -``` -[One short sentence: what you implemented or fixed] - -## Files Changed -- path/to/file1.tsx -- path/to/file2.cs -- path/to/translations.po - -Request: {requestFilePath} -Response: {responseFilePath} - -[If working in parallel: Include parallel work notification from coordinator, e.g., "⚠️ Parallel Work: Frontend-engineer is working in parallel on {task-title}"] -``` - -**MCP call parameters**: -- `agentType`: backend-reviewer, frontend-reviewer, or qa-reviewer -- `taskTitle`: From current-task.json -- `markdownContent`: Your delegation message above -- `branch`: From current-task.json -- `featureId`: From current-task.json -- `taskId`: From current-task.json -- `requestFilePath`: From current-task.json -- `responseFilePath`: From current-task.json - -**Review loop**: -- If reviewer returns NOT APPROVED → Fix issues → Update [task] status to [Review] → Call reviewer subagent again. -- If reviewer returns APPROVED → Check YOUR files (not parallel engineers' files) are committed → Proceed to completion. -- Don't call CompleteWork unless reviewer approved and committed your code. -- Don't commit code yourself - only the reviewer commits. -- If rejected 3+ times with same feedback despite validation tools passing: Report problem with severity: error, then stop. Don't call CompleteWork, don't proceed with work - the user will take over manually. - -**STEP 10**: Check feature progress (skip in standalone mode/optional in agentic mode) - -**If `featureId` is NOT "ad-hoc" (regular task from a feature):** -- Optionally check if there are more [tasks] remaining in the [feature]. -- This helps provide context in your completion message. - -**If `featureId` is "ad-hoc" (ad-hoc work):** -- Skip (no [feature] to check). - -**STEP 11**: Call CompleteWork after reviewer approval (skip in standalone mode) - -After completing all work and receiving reviewer approval, call the MCP **CompleteWork** tool with `mode: "task"` to signal completion. This tool call will terminate your session. - -CompleteWork requires reviewer approval and committed code. - -Call CompleteWork after reviewer approval, even if this is the last [task] in a [feature]. - -**Before calling CompleteWork**: -1. Ensure all work is complete and all todos are marked as completed. -2. Write a comprehensive response (what you accomplished, notes for Coordinator). -3. Create an objective technical summary in sentence case (like a commit message). -4. Reflect on your experience and write categorized feedback using prefixes: - - `[system]` - Workflow, MCP tools, agent coordination, message handling. - - `[requirements]` - Requirements clarity, acceptance criteria, task description. - - `[code]` - Code patterns, rules, architecture guidance. - - Examples: - - `[system] CompleteWork returned errors until title was less than 100 characters - consider adding format description`. - - `[requirements] Task mentioned Admin but unclear if TenantAdmin or WorkspaceAdmin`. - - `[code] No existing examples found for implementing audit logging in this context`. - - You can provide multiple categorized items. Use report_problem for urgent system bugs during work. - -**Call MCP CompleteWork tool**: -- `mode`: "task" -- `agentType`: Your agent type (backend-engineer, frontend-engineer, or qa-engineer) -- `taskSummary`: Objective technical description of what was implemented (imperative mood, sentence case). Examples: "Add user role endpoints with authorization", "Implement user avatar upload", "Fix null reference in payment processor". NEVER use subjective evaluations like "Excellent implementation" or "Clean code". -- `responseContent`: Your full response in markdown -- `feedback`: Mandatory categorized feedback using [system], [requirements], or [code] prefixes as described above - -⚠️ Your session terminates IMMEDIATELY after calling CompleteWork - ---- - -## REMINDER: Use Exact TodoWrite JSON - -**✅ DO: Copy the JSON from STEP 2**. - -**❌ DON'T: Create custom todo format**. diff --git a/.cursor/rules/workflows/process/review-end-to-end-tests.mdc b/.cursor/rules/workflows/process/review-end-to-end-tests.mdc deleted file mode 100644 index c8b0ad2597..0000000000 --- a/.cursor/rules/workflows/process/review-end-to-end-tests.mdc +++ /dev/null @@ -1,206 +0,0 @@ ---- -description: Review end-to-end test implementation for a [task] -globs: -alwaysApply: false ---- -# Review E2E Tests Workflow - -You are reviewing: **{{{title}}}** - -**Agentic vs standalone mode:** Your system prompt will explicitly state if you are in *agentic mode*. Otherwise, assume *standalone mode* and skip steps marked "(skip in standalone mode)". - -- **Agentic mode**: The review request comes from `current-task.json`. The CLI passes only the task title as the slash command argument. You run autonomously without human supervision - work with your team to find solutions. -- **Standalone mode**: Test files are passed as command arguments `{{{title}}}`. Read test files from user-provided paths or from `git status`. - -## Review Principles - -**Zero Tolerance for Test Quality**: E2E tests must be perfect. ALL tests must pass, ZERO console errors, ZERO network errors, NO sleep statements. There are no exceptions. - -**Evidence-Based Reviews**: Every finding must be backed by rules in `/.cursor/rules/end-to-end-tests/end-to-end-tests.mdc` or established patterns in the codebase. - -**Speed is Critical**: Tests must run fast. Reject tests that are unnecessarily slow or create too many small test files. - ---- - -## STEP 0: Mandatory Preparation - -1. **Read [PRODUCT_MANAGEMENT_TOOL]-specific guide** at `/.cursor/reference/product-management/[PRODUCT_MANAGEMENT_TOOL].md` to understand terminology, status mapping, ID format, and MCP configuration. - -2. **Read `current-task.json` from `.workspace/agent-workspaces/{branch-name}/{agent-type}/current-task.json`** to get: - - `requestFilePath`: Request file path (contains engineer's request message) - - `responseFilePath`: Response file path (where you'll write your review outcome) - - `featureId`: [FeatureId] (the feature this task belongs to, or "ad-hoc" for ad-hoc work) - - `taskId`: [TaskId] (the task being reviewed, or "ad-hoc-yyyyMMdd-HHmm" for ad-hoc work) - - `taskTitle`: Task title - -3. **Read the request file** from the path in `requestFilePath`. - -4. **Read all files referenced in the engineer's request** (test files, implementation details, etc.). - -5. **Create Todo List** - -**CALL TodoWrite TOOL WITH THIS EXACT JSON - COPY AND PASTE**: - -```json -{ - "todos": [ - {"content": "Read [feature] and [task] to understand requirements", "status": "pending", "activeForm": "Reading feature and task"}, - {"content": "Run feature-specific e2e tests", "status": "pending", "activeForm": "Running feature E2E tests"}, - {"content": "Review test file structure and organization", "status": "pending", "activeForm": "Reviewing test structure"}, - {"content": "Review each test step for correct patterns", "status": "pending", "activeForm": "Reviewing test steps"}, - {"content": "Review test efficiency and speed", "status": "pending", "activeForm": "Reviewing test efficiency"}, - {"content": "Make binary decision (approve or reject)", "status": "pending", "activeForm": "Making decision"}, - {"content": "If approved, run full regression test suite", "status": "pending", "activeForm": "Running full regression tests"}, - {"content": "If approved, commit changes", "status": "pending", "activeForm": "Committing if approved"}, - {"content": "Update [task] status to [Completed] or [Active]", "status": "pending", "activeForm": "Updating task status"}, - {"content": "MANDATORY: Call CompleteWork", "status": "pending", "activeForm": "Calling CompleteWork"} - ] -} -``` - ---- - -## Workflow Steps - -**STEP 1**: Read [feature] and [task] to understand requirements - -1. **Read the [feature]** from `featureId` in [PRODUCT_MANAGEMENT_TOOL] (if not ad-hoc): - - Understand the overall problem and solution approach. - -2. **Read the [task]** from `taskId` in [PRODUCT_MANAGEMENT_TOOL]: - - Read the task description carefully. - - Understand what tests should cover. - -3. **Read engineer's request** to understand what tests were created. - -**If [task] lookup fails** (not found, already completed, or error): This is a coordination error. Report a problem and reject the review explaining the task could not be found. - -4. **Study E2E rules**: - - Read [End-to-End Tests](mdc:.cursor/rules/end-to-end-tests/end-to-end-tests.mdc) - - Ensure engineer followed all patterns - -**STEP 2**: Run feature-specific e2e tests first - -**If tests require backend changes, run the run tool first**: -- Use **run MCP tool** to restart server and run migrations -- The tool starts .NET Aspire at https://localhost:9000 - -**Run feature-specific E2E tests**: -- Use **end-to-end MCP tool** to run tests: `end-to-end(searchTerms=["feature-name"])` -- **ALL tests MUST pass with ZERO failures to approve** -- **Verify ZERO console errors** during test execution -- **Verify ZERO network errors** (no unexpected 4xx/5xx responses) -- If ANY test fails: REJECT -- If ANY console errors: REJECT -- If ANY network errors: REJECT - -**STEP 3**: Review test file structure and organization - -**Critical Check 1 - Test Count:** -- Normally ONE new `@comprehensive` test per feature -- Existing `@smoke` tests should be updated, not duplicated -- For BIG features: Allow both new `@smoke` and new `@comprehensive` -- **Reject if too many small test files created** - -**STEP 4**: Review each test step for correct patterns - -**Critical Check 1 - Step Naming Pattern:** -- **EVERY step MUST follow**: "Do something & verify result" -- ✅ Good: `"Submit login form & verify authentication"` -- ❌ Bad: `"Verify button is visible"` (no action) -- ❌ Bad: `"Test login"` (uses "test" prefix) -- **Reject if steps don't follow pattern** - -**Critical Check 2 - No Sleep Statements:** -- Search for: `waitForTimeout`, `sleep`, `delay`, `setTimeout` -- **Reject if found—no exceptions** -- Playwright auto-waits—sleep is NEVER needed in any scenario -- Demand Playwright await assertions instead: - - Use `toBeVisible()`, `toHaveURL()`, `toContainText()`, etc. - - These built-in auto-wait mechanisms handle all timing scenarios - -**STEP 5**: Review test efficiency and speed - -**Critical Check 1 - Leverage Existing Logic:** -- Verify tests use fixtures: `{ page }`, `{ ownerPage }`, `{ adminPage }`, `{ memberPage }` -- Verify tests use helpers: `expectToastMessage`, `expectValidationError`, etc. -- **Reject if tests duplicate existing logic** - -**Critical Check 2 - Speed Optimization:** -- Tests should test MANY things in FEW steps -- Avoid excessive navigation or setup -- Group related scenarios together -- **Reject if tests are unnecessarily slow** - -**STEP 6**: Make binary decision (approve or reject) - -**Aim for perfection, not "good enough".** - -**APPROVED only if ALL criteria met:** -- ✓ All E2E tests passed with zero failures -- ✓ Zero console errors during test execution -- ✓ Zero network errors during test execution -- ✓ No sleep statements found -- ✓ All steps follow "Do something & verify result" pattern -- ✓ Tests use existing fixtures and helpers -- ✓ Tests are efficient and fast - -**Reject if any issue exists—no exceptions. Common rationalizations to avoid:** -- ✗ "Test failed but feature works manually" → Reject, fix test -- ✗ "Console error unrelated to E2E code" → Reject anyway -- ✗ "It's just a warning" → Reject, zero means zero -- ✗ "Previous test run passed" → Reject anyway if current run has issues - -**When rejecting:** Do full review first, then reject with ALL issues listed (avoid multiple rounds). - -**STEP 7**: If approved, run full regression test suite - -**Before committing, run all e2e tests to ensure no regressions:** -- Use **end-to-end MCP tool** WITHOUT searchTerms: `end-to-end()` -- This runs the complete test suite across all browsers -- **ALL tests MUST pass with ZERO failures** -- If ANY test fails: REJECT (do not commit) - -**STEP 8**: Commit changes - -1. Stage test files: `git add ` for each test file -2. Commit: One line, imperative form, no description, no co-author -3. Get hash: `git rev-parse HEAD` - -Don't use `git add -A` or `git add .` - -**STEP 9**: Update [task] status to [Completed] or [Active] - -**If `featureId` is NOT "ad-hoc" (regular task from a feature):** -- If APPROVED: Update [task] status to [Completed]. -- If REJECTED: Update [task] status back to [Active]. - -**If `featureId` is "ad-hoc" (ad-hoc work):** -- Skip [PRODUCT_MANAGEMENT_TOOL] status updates. - -**STEP 10**: Call CompleteWork - -**Call MCP CompleteWork tool**: -- `mode`: "review" -- `agentType`: qa-reviewer -- `commitHash`: Commit hash if approved, null/empty if rejected -- `rejectReason`: Rejection reason if rejected, null/empty if approved -- `responseContent`: Your full review feedback -- `feedback`: Mandatory categorized feedback using prefixes: - - `[system]` — Workflow, MCP tools, agent coordination, message handling - - `[requirements]` — Requirements clarity, acceptance criteria, task description - - `[code]` — Code patterns, rules, architecture guidance - - Examples: `[system] end-to-end MCP tool reported test passed but it actually failed` or `[requirements] Feature requirements didn't specify mobile viewport testing` - -⚠️ Your session terminates IMMEDIATELY after calling CompleteWork. - ---- - -## Rules - -1. **Tests must pass** — Don't approve failing tests -2. **No sleep statements** — Non-negotiable -3. **Follow step pattern** — Every step needs action + verification -4. **One test per feature** — Avoid test proliferation -5. **Speed matters** — Reject slow, inefficient tests diff --git a/.cursor/rules/workflows/process/review-task.mdc b/.cursor/rules/workflows/process/review-task.mdc deleted file mode 100644 index 173bf2d405..0000000000 --- a/.cursor/rules/workflows/process/review-task.mdc +++ /dev/null @@ -1,472 +0,0 @@ ---- -description: Review a specific [task] implementation from a [feature] following the systematic review workflow -globs: -alwaysApply: false ---- -# Review Task Workflow - -You are reviewing: **{{{title}}}** - -**Agentic vs standalone mode:** Your system prompt will explicitly state if you are in *agentic mode*. Otherwise, assume *standalone mode* and skip steps marked "(skip in standalone mode)". - -- **Agentic mode**: The review request comes from `current-task.json`. The CLI passes only the task title as the slash command argument. You run autonomously without human supervision - work with your team to find solutions. -- **Standalone mode**: Review request is passed as command arguments `{{{title}}}`. Read changed files from `git status` or user-provided list. - -## Review Principles - -**Devil's Advocate Mindset**: Your job is to validate the engineer's work by actively searching for problems. Look for inconsistencies, deviations, and potential issues. - -**Zero Tolerance**: ALL findings must be fixed, regardless of severity. Never dismiss issues as "minor" or "not worth fixing". Every deviation from rules or established patterns must be addressed. - -**Evidence-Based Reviews**: Every finding must be backed by: -1. Explicit rules from `.cursor/rules/` files, OR -2. Established patterns found elsewhere in the codebase (cite specific file:line examples), OR -3. Well-established ecosystem conventions (e.g., .NET interfaces prefixed with `I`) - -Avoid subjective personal preferences. - -**Line-by-Line Review**: Like GitHub PR reviews - comment ONLY on specific file:line combinations that have issues. NO comments on correct code. NO commentary on what was done well. - -**Objective Language**: State facts about rule violations or pattern deviations. Reference specific rules or codebase examples. Avoid subjective evaluations or praise. - -**Concise Communication**: Minimize token usage for the engineer. Focus only on what needs fixing. - ---- - -## STEP 0: Mandatory Preparation - -1. **Read [PRODUCT_MANAGEMENT_TOOL]-specific guide** at `/.cursor/reference/product-management/[PRODUCT_MANAGEMENT_TOOL].md` to understand terminology, status mapping, ID format, and MCP configuration. - -2. **Read `current-task.json` from `.workspace/agent-workspaces/{branch-name}/{agent-type}/current-task.json`** to get: - - `requestFilePath`: Request file path (contains engineer's request message) - - `responseFilePath`: Response file path (where you'll write your review outcome) - - `featureId`: [FeatureId] (the feature this task belongs to, or "ad-hoc" for ad-hoc work) - - `taskId`: [TaskId] (the task being reviewed, or "ad-hoc-yyyyMMdd-HHmm" for ad-hoc work) - - `taskTitle`: Task title - -3. **Read the request file** from the path in `requestFilePath`. - -4. **Read all files referenced in the engineer's request** (implementation details, changed files, etc.). - -5. **Create Todo List** - -**CALL TodoWrite TOOL WITH THIS EXACT JSON - COPY AND PASTE**: - -```json -{ - "todos": [ - {"content": "Read [feature] and [task] to understand requirements", "status": "pending", "activeForm": "Reading feature and task"}, - {"content": "Create checklist of all requirements from [task] description", "status": "pending", "activeForm": "Creating requirements checklist"}, - {"content": "Run validation tools in parallel (format, test, inspect)", "status": "pending", "activeForm": "Running validation tools"}, - {"content": "Verify translations (frontend-reviewer only)", "status": "pending", "activeForm": "Verifying translations"}, - {"content": "Test in browser with zero tolerance (frontend-reviewer only)", "status": "pending", "activeForm": "Testing in browser"}, - {"content": "Review changed files one-by-one", "status": "pending", "activeForm": "Reviewing files"}, - {"content": "Review high-level architecture", "status": "pending", "activeForm": "Reviewing architecture"}, - {"content": "Verify all requirements met with tests", "status": "pending", "activeForm": "Verifying requirements"}, - {"content": "If approved, commit changes (or reject if any issues found)", "status": "pending", "activeForm": "Committing changes or rejecting"}, - {"content": "Update [task] status to [Completed] or [Active]", "status": "pending", "activeForm": "Updating task status"}, - {"content": "MANDATORY: Call CompleteWork", "status": "pending", "activeForm": "Calling CompleteWork"} - ] -} -``` - -**After creating this template**: Remove todo items marked for a different reviewer role. For example, if you're a backend-reviewer, remove items containing "(frontend-reviewer only)". - -**After creating base todo, unfold "Review changed files one-by-one":** - -1. Get list of changed files from engineer's request (NOT from git status). -2. Replace the single "Review changed files" item with individual file review items. -3. Use tree format (├─ and └─). - -**Example:** -``` -Review changed files one-by-one -├─ Read and review User.cs [pending] -├─ Read and review UserRepository.cs [pending] -├─ Read and review CreateUserCommand.cs [pending] -└─ Read and review UsersEndpoint.cs [pending] -``` - ---- - -## Workflow Steps - -**STEP 1**: Read [feature] and [task] to understand requirements - -1. **Read the [feature]** from `featureId` in [PRODUCT_MANAGEMENT_TOOL] (if not ad-hoc): - - Understand the overall problem and solution approach. - -2. **Read the [task]** from `taskId` in [PRODUCT_MANAGEMENT_TOOL]: - - Read the task description carefully. - - Note all subtask bullets (implementation steps). - -3. **Read engineer's request and response files** to understand what was actually implemented. - -**If [task] lookup fails** (not found, already completed, or error): This is a coordination error. Report a problem and reject the review explaining the task could not be found. - -**STEP 2**: Create checklist of all requirements from [task] description - -Extract ALL business rules, edge cases, and validations from task description: - - What are the business rules? (uniqueness, permissions, constraints). - - What validations are required? - - What edge cases must be handled? - - What should NOT be allowed? - - What are the tenant isolation requirements? - -**Example requirements checklist (focus on details, not obvious structure):** -``` -Business rules and validations: -- [ ] Email must be unique within tenant (not globally). -- [ ] Email validation (valid format). -- [ ] Only Tenant Owners can create users. -- [ ] Full name max length ≤ 100 characters. -- [ ] Cannot delete last Owner in tenant. -- [ ] Soft delete (not hard delete). -- [ ] Tenant isolation (users scoped to tenant). -- [ ] Max 3 tenant owners on a tenant. -... - -Edge cases and error handling: -- [ ] Test duplicate email rejection. -- [ ] Test invalid email format. -- [ ] Test non-owner attempting create (403 Forbidden). -- [ ] Test deleting last owner (should fail). -- [ ] Test name > 100 chars validation. -- [ ] Test creating user in different tenant (isolation). -... -``` - -This checklist focuses on non-obvious requirements that reviewers often miss. - -4. **Read engineer's request and response files** to understand what was actually implemented. - -The [feature] plan was AI-generated by tech-lead in a few minutes after interviewing the user. Engineers spend implementation time considering the code carefully. You are the expert reviewer. If implementation or task design doesn't align with: -- Feature intent. -- Rules in the project. -- Patterns used in the solution. -- Architectural patterns. -- Best practices. -- Simpler approaches. - -**Reject and provide guidance.** Better ideas from review phase should surface. - -**Collaborate with your team**: For complex problems or design questions, engage in conversation with engineers or other reviewers. Better solutions often emerge from team collaboration. - -**STEP 3**: Run validation tools - -**Zero tolerance for issues**: -- We deploy to production after review - quality is non-negotiable. -- **Boy Scout Rule**: The codebase must be cleaner than before. -- Reject if any failures, warnings, or problems exist anywhere in the system. -- This includes pre-existing issues unrelated to engineer's changes. -- Don't approve code with outstanding issues. -- Infrastructure failures (MCP errors, tools fail) → Reject, report problem, do not approve. - -**Inspect findings block merging**: If inspect returns "Issues found", the CI pipeline will fail and the code cannot be merged. The severity level (note/warning/error) is irrelevant - all findings must be fixed before approval. - -**For backend-reviewer** (validates all self-contained systems to catch cross-self-contained-system breakage): - -1. Run **build**, **format**, **test**, **inspect** following the global tool execution instructions. - -2. Handle validation results: - - **If NO parallel work notification in request**: REJECT if ANY failures found (zero tolerance). - - **If parallel work notification present** (e.g., "⚠️ Parallel Work: Frontend-engineer..."): - - REJECT if backend failures found (Core/, Api/, Tests/, Database/). - - IGNORE frontend failures (WebApp/) unless caused by backend API contract changes. - - If frontend failures seem related to backend API changes: Note in rejection that frontend-engineer may need to adapt. - -**For frontend-reviewer** (validates frontend only): - -1. Run **build**, **format**, **inspect** for frontend following the global tool execution instructions. - -2. Handle validation results: - - **If NO parallel work notification in request**: REJECT if ANY failures found (zero tolerance). - - **If parallel work notification present** (e.g., "⚠️ Parallel Work: Backend-engineer..."): - - REJECT if frontend failures found (WebApp/). - - IGNORE backend failures (Core/, Api/, Tests/) unless caused by frontend breaking the API contract. - - If backend failures seem related to API integration: Note in rejection. - -**For qa-reviewer** (validates E2E tests): - -1. Run **build** for frontend, then run **end-to-end** tests following the global tool execution instructions. - -2. REJECT if ANY failures found (zero tolerance). - -**If validation fails with errors unrelated to engineer's changes**: -- Check `git log --oneline` for recent parallel engineer commits. -- If recent commits exist: Sleep 5 minutes, re-run validation. -- If issue persists: REJECT. Per Boy Scout Rule, the engineer is responsible for fixing ALL issues found, even pre-existing ones. - -**Note**: All architectural rules for your role are embedded in your system prompt and available for reference at all times. - -**STEP 4**: Verify translations (frontend-reviewer only) - -Check all `*.po` files for empty `msgstr ""` entries and inconsistent domain terminology. Reject if translations are missing or terminology differs from established usage elsewhere. - -**STEP 5**: Test in browser with zero tolerance (frontend-reviewer only) - -**Required for frontend reviewers** - -If infrastructure issues prevent testing: Try to recover (use run MCP tool to restart server, retry browser). If recovery fails, complete the rest of your review, then reject with all findings including the infrastructure issue. Report problem for infrastructure failures. - -1. **Navigate to https://localhost:9000** and test ALL functionality: - - **Test the COMPLETE happy path** of the new feature from start to finish. - - **Test ALL edge cases**: validation errors, empty states, maximum values, special characters, boundary conditions. - - **Test user scenarios**: What would a user actually do with this feature? Try to break it. - - **Take screenshots** and critically examine if everything renders with expected layout and styling. - - Test in **dark mode** and **light mode** (switch theme and verify UI renders correctly). - - Test **localization** (switch language if feature has translatable strings). - - Test **responsive behavior**: mobile size, small browser, large browser (resize and verify layout adapts). - - Verify engineer documented what they tested - if not documented, REJECT. - - If website not responding, use **run** MCP tool to restart server. - -2. **Test with different user roles** (CRITICAL): - - Test as admin: `admin@platformplatform.local` / `UNLOCK`. - - **Test as non-admin user** if feature has role-based behavior. - - Verify permissions, access controls, and role-specific UI elements work correctly. - - REJECT if role-based features not tested with appropriate roles. - -3. **Monitor Network tab** - REJECT if ANY issues found: - - **Zero tolerance**: No failed requests, no 4xx/5xx errors. - - Check ALL API calls for the new feature execute successfully. - - No slow requests without explanation. - - REJECT if ANY network warnings or errors found (even pre-existing per Boy Scout rule). - - ✗ BAD: "500 error is backend problem" → REJECT ANYWAY. - - ✗ BAD: "Network error unrelated to my changes" → REJECT ANYWAY. - -4. **Monitor Console tab** - REJECT if ANY issues found: - - **Zero tolerance**: No console errors, no warnings. - - REJECT if ANY console errors or warnings found (even pre-existing per Boy Scout rule). - - Clear console and verify it stays clean during all interactions. - - ✗ BAD: "Warning unrelated to my code" → REJECT ANYWAY. - - ✗ BAD: "HMR error, not my problem" → REJECT ANYWAY. - -5. **Analyze screenshots for UI quality** (take screenshots of new UI): - - Check spacing, sizing, alignment, borders match design patterns. - - Verify responsive behavior (resize browser, test mobile viewport). - - Check color contrast, typography, visual hierarchy. - - REJECT if UI elements are misaligned, poorly spaced, or inconsistent. - - AI is bad at visual design - use your human judgment on screenshots. - -6. **Login instructions**: - - Username: `admin@platformplatform.local`. - - Use `UNLOCK` for verification code (works on localhost only). - - If user doesn't exist: Sign up for a new tenant, use `UNLOCK` for verification code. - -If you discover bugs during testing (API errors, broken functionality, console errors, network errors), reject. Zero tolerance means reject on any issue found. - -**Boy Scout Rule**: If you find pre-existing issues unrelated to engineer's changes, REJECT and require engineer to fix them. Zero tolerance means ZERO - not "only for my changes". - -**STEP 6**: Review changed files one-by-one - -**Review files individually, not in bulk:** - -For EACH file in your unfolded todo: -1. **Mark file [in_progress]** in todo. -2. **Read the ENTIRE file** using Read tool. -3. **Review line-by-line** against rules and patterns: - - Does it follow architectural patterns? (check similar files in codebase). - - Are there any rule violations or pattern deviations? - - Document findings: cite specific file:line + rule/pattern violated. -4. **Update todo item with result and mark [completed]**: - - If file has issues: Change to "Read and review FileName.cs (Issues found)". - - If file is clean: Change to "Read and review FileName.cs (Approved)". -5. **Move to next file**. - -**Example todo progression:** -``` -☒ ├─ Read and review TeamEndpoints.cs (Approved) -☒ ├─ Read and review CreateTeam.cs (Issues found) -☐ ├─ Read and review DeleteTeam.cs -``` - -**Why one-by-one:** -- Ensures thorough review of each file. -- Prevents missing details in bulk reviews. -- Critical for larger tasks. - -Play the devil's advocate, and reject if you find ANY small thing that is objectively not correct. - -**STEP 7**: Review high-level architecture - -After reviewing all individual files, step back and review the overall design: - -1. **Verify the implementation approach** makes sense: - - Are entities/aggregates designed correctly? - - Do commands/queries follow CQRS patterns? - - Are API contracts well-designed? - - Does the UI architecture follow patterns (frontend)? - -2. **Check cross-file consistency**: - - Do all pieces work together correctly? - - Are naming conventions consistent? - - Is the data flow logical? - -3. **Verify it solves the business problem**: - - Does this implementation actually deliver what the [task] requires? - - Are there simpler approaches? - -Play the devil's advocate, and reject if you find ANY small thing that is objectively not correct. - -**Update todo item:** -- Change to "Review high-level architecture (Approved)" or "(Issues found)". -- Mark as [completed]. - -**STEP 8**: Verify all requirements met with tests - -**Go through your requirements checklist from STEP 1 systematically:** - -For EACH business rule: -1. **Find the implementation** - Search the reviewed files for where this rule is enforced. -2. **Find the test** - Search test files for test covering this rule. -3. **Verify edge case coverage** - Does the test check boundary conditions, error paths? - -**For EACH validation:** -1. **Verify it exists** - Is the validation implemented? -2. **Verify error message** - Does it return proper error response? -3. **Verify test coverage** - Is there a test proving it rejects invalid input? - -**For EACH permission check:** -1. **Verify guard exists** - Is permission checked in command/endpoint? -2. **Verify correct roles** - Does it check the right role (Owner, Admin, Member)? -3. **Verify test coverage** - Is there a test proving unauthorized access is rejected (403)? - -If any requirement is missing, not implemented correctly, or not tested, reject with specific gaps. - -**Example verification:** -``` -Requirements verification: -✓ Email unique within tenant - Implemented in User.cs:45, tested in CreateUserTests.cs:120. -✗ Only Owners can create - No permission guard found in CreateUserCommand. -✗ Cannot delete last Owner - Implementation exists in DeleteUserCommand.cs:67 but NO TEST. -✗ Tenant isolation - Tests only check happy path, missing test for cross-tenant access. - -REJECT: Missing permission guard for create. Missing test for last-owner protection. Missing tenant isolation test. -``` - -**Update todo item:** -- Change to "Verify all requirements met with tests (Approved)" or "(Requirements missing)". -- Mark as [completed]. - -**STEP 9**: If approved, commit changes (or reject if any issues found) - -**Aim for perfection, not "good enough".** - -By this point, you've already marked each file, architecture, and requirements as "(Approved)" or "(Issues found)". Now make the final decision: - -**APPROVED only if ALL criteria met:** -- ✓ All validation tools passed (build, format, test, inspect). -- ✓ Browser testing completed successfully (frontend only). -- ✓ Zero console errors or warnings. -- ✓ Zero network errors (no 4xx, no 5xx). -- ✓ No skipped mandatory steps for ANY reason. -- ✓ All code follows rules and patterns. -- ✓ Pre-existing issues fixed (Boy Scout Rule). -- ✓ All files marked "(Approved)". -- ✓ Architecture marked "(Approved)". -- ✓ Requirements marked "(Approved)". - -**Reject if any issue exists - no exceptions. Common rationalizations to avoid:** -- ✗ "Backend issue, not frontend problem" → Reject anyway. -- ✗ "Previous review verified it" → Reject anyway. -- ✗ "Validation tools passed" → Not enough if browser has errors. -- ✗ "Infrastructure/MCP issue" → Reject anyway, report problem. -- ✗ "Pre-existing problem" → Reject anyway per Boy Scout Rule. -- ✗ "It's just a warning" → Reject, zero means zero. - -**When rejecting:** Do full review first, then reject with ALL issues listed (avoid multiple rounds). Skip to STEP 9 to update status, then STEP 10 to call CompleteWork. - -**If APPROVED, proceed with commit:** - -1. Identify files to commit from review context: - - Run `git status --porcelain` to see all changed files - - Filter to YOUR scope only: - - **Backend reviewer**: Api/Core/Tests files + `*.Api.json` files (auto-generated, in WebApp folder) - - **Frontend reviewer**: WebApp files + `*.po` files (auto-generated) EXCEPT `*.Api.json` files -2. Stage files: `git add ` for each file -3. Commit: One line, imperative form, no description, no co-author -4. Get hash: `git rev-parse HEAD` - -Don't use `git add -A` or `git add .` - -**STEP 10**: Update [task] status to [Completed] or [Active] - -**If `featureId` is NOT "ad-hoc" (regular task from a feature):** -- If APPROVED: Update [task] status to [Completed]. -- If REJECTED: Update [task] status back to [Active]. - -**If `featureId` is "ad-hoc" (ad-hoc work):** -- Skip [PRODUCT_MANAGEMENT_TOOL] status updates. - -**STEP 11**: Call CompleteWork - -Call MCP **CompleteWork** tool with `mode: "review"` - your session terminates after this call. - -**Categorized Feedback Required**: -Use category prefixes for all feedback: -- `[system]` - Workflow, MCP tools, agent coordination, message handling. -- `[requirements]` - Requirements clarity, acceptance criteria, task description. -- `[code]` - Code patterns, rules, architecture guidance. - -Examples: -- `[system] Validation tools reported stale results from previous run`. -- `[requirements] Engineer's file list didn't match git status - unclear which files were in scope`. -- `[code] Missing examples for implementing telemetry in this pattern`. - -**For APPROVED reviews**: -- Provide: `mode: "review"`. -- Provide: `commitHash` (from `git rev-parse HEAD` in STEP 8). -- Provide: `rejectReason` as null or empty string. -- Provide: `feedback`: Mandatory categorized feedback using [system], [requirements], or [code] prefixes. - -**For REJECTED reviews**: -- Provide: `mode: "review"`. -- Provide: `commitHash` as null or empty string. -- Provide: `rejectReason` (sentence case, imperative mood). -- Provide: `feedback`: Mandatory categorized feedback using [system], [requirements], or [code] prefixes. - ---- - -## Response Format Requirements - -When calling CompleteWork with `responseContent`: - -**For REJECTED reviews**: - -```markdown -[Short objective summary of why rejected - 1-2 sentences or short paragraph if more elaboration needed] - -## Issues - -### File.cs:Line -[Objective description of problem] -- **Rule/Pattern**: [Reference to .cursor/rules/X.mdc or pattern from codebase] -- **Fix**: [Optional: Suggest specific change] - -### AnotherFile.cs:Line -[Objective description of problem] -- **Rule/Pattern**: [Reference] -- **Fix**: [Optional] -``` - -**For APPROVED reviews**: - -```markdown -[One sentence objective explanation of why approved, e.g., "Follows established patterns for X and complies with rules Y and Z"] -``` - -**Requirements**: -- Line-by-line review like GitHub PR. -- NO comments on correct code. -- NO subjective language ("excellent", "great", "well done"). -- NO dismissing issues as "minor" or "optional". -- Cite specific rules or codebase patterns. -- Keep responses concise to minimize token usage. - ---- - -## REMINDER: Use Exact TodoWrite JSON - -**✅ DO: Copy JSON from above**. - -**❌ DON'T: Create custom format**. diff --git a/.github/workflows/_deploy-infrastructure.yml b/.github/workflows/_deploy-infrastructure.yml index fdede16e14..4d9eca41f9 100644 --- a/.github/workflows/_deploy-infrastructure.yml +++ b/.github/workflows/_deploy-infrastructure.yml @@ -30,7 +30,7 @@ on: domain_name: required: true type: string - sql_admin_object_id: + postgres_admin_object_id: required: true type: string production_service_principal_object_id: @@ -84,7 +84,7 @@ jobs: STRIPE_PUBLISHABLE_KEY: ${{ vars.STRIPE_PUBLISHABLE_KEY }} STRIPE_API_KEY: ${{ secrets.STRIPE_API_KEY }} STRIPE_WEBHOOK_SECRET: ${{ secrets.STRIPE_WEBHOOK_SECRET }} - run: bash ./cloud-infrastructure/cluster/deploy-cluster.sh ${{ inputs.unique_prefix }} ${{ inputs.azure_environment }} ${{ inputs.cluster_location }} ${{ inputs.cluster_location_acronym }} ${{ inputs.sql_admin_object_id }} ${{ inputs.domain_name }} --plan + run: bash ./cloud-infrastructure/cluster/deploy-cluster.sh ${{ inputs.unique_prefix }} ${{ inputs.azure_environment }} ${{ inputs.cluster_location }} ${{ inputs.cluster_location_acronym }} ${{ inputs.postgres_admin_object_id }} ${{ inputs.domain_name }} --plan - name: Show DNS Configuration if: ${{ inputs.domain_name != '' && inputs.domain_name != '-' }} @@ -149,7 +149,7 @@ jobs: STRIPE_PUBLISHABLE_KEY: ${{ vars.STRIPE_PUBLISHABLE_KEY }} STRIPE_API_KEY: ${{ secrets.STRIPE_API_KEY }} STRIPE_WEBHOOK_SECRET: ${{ secrets.STRIPE_WEBHOOK_SECRET }} - run: bash ./cloud-infrastructure/cluster/deploy-cluster.sh ${{ inputs.unique_prefix }} ${{ inputs.azure_environment }} ${{ inputs.cluster_location }} ${{ inputs.cluster_location_acronym }} ${{ inputs.sql_admin_object_id }} ${{ inputs.domain_name }} --apply + run: bash ./cloud-infrastructure/cluster/deploy-cluster.sh ${{ inputs.unique_prefix }} ${{ inputs.azure_environment }} ${{ inputs.cluster_location }} ${{ inputs.cluster_location_acronym }} ${{ inputs.postgres_admin_object_id }} ${{ inputs.domain_name }} --apply - name: Refresh Azure Tokens # The previous step may take a while, so we refresh the token to avoid timeouts uses: azure/login@v2 @@ -158,14 +158,31 @@ jobs: tenant-id: ${{ inputs.tenant_id }} subscription-id: ${{ inputs.subscription_id }} - - name: Install Microsoft sqlcmd Utility - run: | - curl https://packages.microsoft.com/keys/microsoft.asc | sudo tee /etc/apt/trusted.gpg.d/microsoft.asc && - sudo add-apt-repository "$(wget -qO- https://packages.microsoft.com/config/ubuntu/22.04/prod.list)" && - sudo apt-get update && - sudo apt-get install -y sqlcmd + - name: Install PostgreSQL Client + run: sudo apt-get update && sudo apt-get install -y postgresql-client + + - name: Add PostgreSQL Admin + run: bash ./cloud-infrastructure/cluster/add-postgres-admin.sh ${{ inputs.unique_prefix }} ${{ inputs.azure_environment }} ${{ inputs.cluster_location_acronym }} ${{ inputs.postgres_admin_object_id }} + + - name: Open Firewall + working-directory: cloud-infrastructure/cluster + env: + CLUSTER_RESOURCE_GROUP_NAME: ${{ vars.UNIQUE_PREFIX }}-${{ inputs.azure_environment }}-${{ inputs.cluster_location_acronym }} + POSTGRES_SERVER_NAME: ${{ vars.UNIQUE_PREFIX }}-${{ inputs.azure_environment }}-${{ inputs.cluster_location_acronym }} + DATABASE_NAME: permissions + run: bash ./firewall.sh open - name: Grant Database Permissions run: | bash ./cloud-infrastructure/cluster/grant-database-permissions.sh ${{ inputs.unique_prefix }} ${{ inputs.azure_environment }} ${{ inputs.cluster_location_acronym }} 'account' ${{ steps.deploy_cluster.outputs.ACCOUNT_IDENTITY_CLIENT_ID }} bash ./cloud-infrastructure/cluster/grant-database-permissions.sh ${{ inputs.unique_prefix }} ${{ inputs.azure_environment }} ${{ inputs.cluster_location_acronym }} 'back-office' ${{ steps.deploy_cluster.outputs.BACK_OFFICE_IDENTITY_CLIENT_ID }} + bash ./cloud-infrastructure/cluster/grant-database-permissions.sh ${{ inputs.unique_prefix }} ${{ inputs.azure_environment }} ${{ inputs.cluster_location_acronym }} 'main' ${{ steps.deploy_cluster.outputs.MAIN_IDENTITY_CLIENT_ID }} + + - name: Close Firewall + if: always() + working-directory: cloud-infrastructure/cluster + env: + CLUSTER_RESOURCE_GROUP_NAME: ${{ vars.UNIQUE_PREFIX }}-${{ inputs.azure_environment }}-${{ inputs.cluster_location_acronym }} + POSTGRES_SERVER_NAME: ${{ vars.UNIQUE_PREFIX }}-${{ inputs.azure_environment }}-${{ inputs.cluster_location_acronym }} + DATABASE_NAME: permissions + run: bash ./firewall.sh close diff --git a/.github/workflows/_migrate-database.yml b/.github/workflows/_migrate-database.yml index af18c0a5f0..c5de4d5ca3 100644 --- a/.github/workflows/_migrate-database.yml +++ b/.github/workflows/_migrate-database.yml @@ -52,8 +52,8 @@ jobs: UNIQUE_PREFIX: ${{ vars.UNIQUE_PREFIX }} TENANT_ID: ${{ vars.TENANT_ID }} CLUSTER_RESOURCE_GROUP_NAME: ${{ vars.UNIQUE_PREFIX }}-${{ inputs.azure_environment }}-${{ inputs.cluster_location_acronym }} - SQL_SERVER_NAME: ${{ vars.UNIQUE_PREFIX }}-${{ inputs.azure_environment }}-${{ inputs.cluster_location_acronym }} - SQL_SERVER_FQDN: ${{ vars.UNIQUE_PREFIX }}-${{ inputs.azure_environment }}-${{ inputs.cluster_location_acronym }}.database.windows.net + POSTGRES_SERVER_NAME: ${{ vars.UNIQUE_PREFIX }}-${{ inputs.azure_environment }}-${{ inputs.cluster_location_acronym }} + POSTGRES_HOST: ${{ vars.UNIQUE_PREFIX }}-${{ inputs.azure_environment }}-${{ inputs.cluster_location_acronym }}.postgres.database.azure.com steps: - name: Checkout Code @@ -83,15 +83,16 @@ jobs: working-directory: cloud-infrastructure/cluster env: CLUSTER_RESOURCE_GROUP_NAME: ${{ env.CLUSTER_RESOURCE_GROUP_NAME }} - SQL_SERVER_NAME: ${{ env.SQL_SERVER_NAME }} - SQL_DATABASE_NAME: ${{ inputs.database_name }} + POSTGRES_SERVER_NAME: ${{ env.POSTGRES_SERVER_NAME }} + DATABASE_NAME: ${{ inputs.database_name }} run: bash ./firewall.sh open - name: Generate Script for Pending Migrations id: generate-migration-script working-directory: application run: | - CONNECTION_STRING="Server=tcp:${{ env.SQL_SERVER_FQDN }},1433;Database=${{ inputs.database_name }};Encrypt=True;TrustServerCertificate=False;Connection Timeout=30;Authentication=Active Directory Default;" + ENTRA_USER=$(az postgres flexible-server microsoft-entra-admin list --resource-group ${{ env.CLUSTER_RESOURCE_GROUP_NAME }} --server-name ${{ env.POSTGRES_SERVER_NAME }} --query "[0].principalName" --output tsv) + CONNECTION_STRING="Host=${{ env.POSTGRES_HOST }};Database=${{ inputs.database_name }};Username=$ENTRA_USER;Password=$(az account get-access-token --resource-type oss-rdbms --query accessToken --output tsv);Ssl Mode=VerifyFull;" echo "Checking for pending migrations..." MIGRATION_INFO=$(dotnet ef migrations list \ @@ -100,7 +101,7 @@ jobs: --context ${{ inputs.db_context }} \ --connection "$CONNECTION_STRING" \ --no-build \ - --json 2>/dev/null || echo '[]') + --json) MIGRATION_JSON=$(echo "$MIGRATION_INFO" | sed -n '/^[{[]/,$p') PENDING_MIGRATIONS_JSON=$(echo "$MIGRATION_JSON" | jq '[.[] | select(.applied == false)]') @@ -143,8 +144,8 @@ jobs: working-directory: cloud-infrastructure/cluster env: CLUSTER_RESOURCE_GROUP_NAME: ${{ env.CLUSTER_RESOURCE_GROUP_NAME }} - SQL_SERVER_NAME: ${{ env.SQL_SERVER_NAME }} - SQL_DATABASE_NAME: ${{ inputs.database_name }} + POSTGRES_SERVER_NAME: ${{ env.POSTGRES_SERVER_NAME }} + DATABASE_NAME: ${{ inputs.database_name }} run: bash ./firewall.sh close - name: Upload Migration Script @@ -239,8 +240,8 @@ jobs: UNIQUE_PREFIX: ${{ vars.UNIQUE_PREFIX }} TENANT_ID: ${{ vars.TENANT_ID }} CLUSTER_RESOURCE_GROUP_NAME: ${{ vars.UNIQUE_PREFIX }}-${{ inputs.azure_environment }}-${{ inputs.cluster_location_acronym }} - SQL_SERVER_NAME: ${{ vars.UNIQUE_PREFIX }}-${{ inputs.azure_environment }}-${{ inputs.cluster_location_acronym }} - SQL_SERVER_FQDN: ${{ vars.UNIQUE_PREFIX }}-${{ inputs.azure_environment }}-${{ inputs.cluster_location_acronym }}.database.windows.net + POSTGRES_SERVER_NAME: ${{ vars.UNIQUE_PREFIX }}-${{ inputs.azure_environment }}-${{ inputs.cluster_location_acronym }} + POSTGRES_HOST: ${{ vars.UNIQUE_PREFIX }}-${{ inputs.azure_environment }}-${{ inputs.cluster_location_acronym }}.postgres.database.azure.com steps: - name: Checkout Code @@ -259,25 +260,23 @@ jobs: name: migration-script-${{ inputs.azure_environment }}-${{ inputs.cluster_location_acronym }} path: . - - name: Install Microsoft sqlcmd Utility - run: | - curl https://packages.microsoft.com/keys/microsoft.asc | sudo tee /etc/apt/trusted.gpg.d/microsoft.asc && - sudo add-apt-repository "$(wget -qO- https://packages.microsoft.com/config/ubuntu/22.04/prod.list)" && - sudo apt-get update && - sudo apt-get install -y sqlcmd + - name: Install PostgreSQL Client + run: sudo apt-get update && sudo apt-get install -y postgresql-client - name: Open Firewall working-directory: cloud-infrastructure/cluster env: CLUSTER_RESOURCE_GROUP_NAME: ${{ env.CLUSTER_RESOURCE_GROUP_NAME }} - SQL_SERVER_NAME: ${{ env.SQL_SERVER_NAME }} - SQL_DATABASE_NAME: ${{ inputs.database_name }} + POSTGRES_SERVER_NAME: ${{ env.POSTGRES_SERVER_NAME }} + DATABASE_NAME: ${{ inputs.database_name }} run: bash ./firewall.sh open - name: Apply Migrations run: | - echo "Applying migrations to ${{ inputs.database_name }} database on ${{ env.SQL_SERVER_FQDN }}..." - sqlcmd -S "tcp:${{ env.SQL_SERVER_FQDN }},1433" -d "${{ inputs.database_name }}" --authentication-method=ActiveDirectoryDefault --exit-on-error -i migration.sql + echo "Applying migrations to ${{ inputs.database_name }} database on ${{ env.POSTGRES_HOST }}..." + ACCESS_TOKEN=$(az account get-access-token --resource-type oss-rdbms --query accessToken --output tsv) + ENTRA_USER=$(az postgres flexible-server microsoft-entra-admin list --resource-group ${{ env.CLUSTER_RESOURCE_GROUP_NAME }} --server-name ${{ env.POSTGRES_SERVER_NAME }} --query "[0].principalName" --output tsv) + PGPASSWORD=$ACCESS_TOKEN psql -v ON_ERROR_STOP=1 "host=${{ env.POSTGRES_HOST }} dbname=${{ inputs.database_name }} user='$ENTRA_USER' sslmode=verify-full sslrootcert=system" -f migration.sql echo "Migrations applied successfully!" - name: Display Migration Summary @@ -290,6 +289,6 @@ jobs: working-directory: cloud-infrastructure/cluster env: CLUSTER_RESOURCE_GROUP_NAME: ${{ env.CLUSTER_RESOURCE_GROUP_NAME }} - SQL_SERVER_NAME: ${{ env.SQL_SERVER_NAME }} - SQL_DATABASE_NAME: ${{ inputs.database_name }} + POSTGRES_SERVER_NAME: ${{ env.POSTGRES_SERVER_NAME }} + DATABASE_NAME: ${{ inputs.database_name }} run: bash ./firewall.sh close diff --git a/.github/workflows/cloud-infrastructure.yml b/.github/workflows/cloud-infrastructure.yml index 81c4c25226..c4b6df2d45 100644 --- a/.github/workflows/cloud-infrastructure.yml +++ b/.github/workflows/cloud-infrastructure.yml @@ -37,7 +37,7 @@ jobs: shared_location: ${{ vars.STAGING_SHARED_LOCATION }} cluster_location: ${{ vars.STAGING_CLUSTER_LOCATION }} domain_name: ${{ vars.STAGING_DOMAIN_NAME }} - sql_admin_object_id: ${{ vars.STAGING_SQL_ADMIN_OBJECT_ID }} + postgres_admin_object_id: ${{ vars.STAGING_POSTGRES_ADMIN_OBJECT_ID }} production_service_principal_object_id: ${{ vars.PRODUCTION_SERVICE_PRINCIPAL_OBJECT_ID }} prod1: @@ -55,5 +55,5 @@ jobs: shared_location: ${{ vars.PRODUCTION_SHARED_LOCATION }} cluster_location: ${{ vars.PRODUCTION_CLUSTER1_LOCATION }} domain_name: ${{ vars.PRODUCTION_DOMAIN_NAME }} - sql_admin_object_id: ${{ vars.PRODUCTION_SQL_ADMIN_OBJECT_ID }} + postgres_admin_object_id: ${{ vars.PRODUCTION_POSTGRES_ADMIN_OBJECT_ID }} tenant_id: ${{ vars.TENANT_ID }} diff --git a/.windsurf/workflows/modes/agentic-workflow.md b/.windsurf/workflows/modes/agentic-workflow.md deleted file mode 100644 index 6566493d88..0000000000 --- a/.windsurf/workflows/modes/agentic-workflow.md +++ /dev/null @@ -1,480 +0,0 @@ ---- -description: Load comprehensive knowledge about the PlatformPlatform agentic workflow system -auto_execution_mode: 3 ---- - -# Agentic Workflow System Knowledge - -You now have complete knowledge of the agentic workflow system used in this codebase. - -## System Architecture Overview - -**Core Concept**: Hierarchical AI agent system where coordinator delegates to engineers, engineers delegate to reviewers. All agents run as interactive worker-hosts that communicate via request/response files in a shared messages directory. - -**Agent Hierarchy**: -``` -Tech Lead (creates PRDs) - └─→ Hands off to Coordinator - -Coordinator (orchestrates implementation) - ├─→ Backend Engineer → Backend Reviewer (commits) - ├─→ Frontend Engineer → Frontend Reviewer (commits) - └─→ QA Engineer → QA Reviewer (commits) -``` - -**Process Structure**: Each agent type has two processes: -- **Worker-host** (C# CLI): Manages lifecycle, file watching, launches Claude Code workers -- **Worker agent** (Claude Code): Does actual AI work, uses MCP tools, self-destructs when done - -**Key Mechanisms**: -- Session management: Explicit GUIDs in `.claude-session-id` files -- Request detection: FileSystemWatcher monitors `*.{agentType}.request.*.md` files -- Process monitoring: Inactivity detection (20-62 min), restart logic (max 2 restarts) -- Task recovery: Prompts user to continue incomplete tasks on startup - -## Terminology Standards - -**The PlatformPlatform workflow is tool-agnostic**. Users can switch between Linear, AzureDevOps, Jira, or even markdown files by simply changing `[PRODUCT_MANAGEMENT_TOOL]` in AGENTS.md. - -### Use These Standardized Terms - -When writing or updating `.windsurf/workflows/process/**` files, system prompts, agent definitions, or any workflow documentation: - -**Work Item Hierarchy**: -- `[feature]` / `[features]` or `[Feature]` / `[Features]` — a collection of tasks -- `[task]` / `[tasks]` or `[Task]` / `[Tasks]` — a complete vertical slice implementation unit -- `[subtask]` / `[subtasks]` or `[Subtask]` / `[Subtasks]` — bullet points in task descriptions (not tracked separately) - -**Status Flow**: -- For [Feature]: `[Planned]` → `[Active]` → `[Resolved]` -- For [Task]: `[Planned]` → `[Active]` → `[Review]` → `[Completed]` -- For [Subtask]: No status (just bullets in description) - -Use capitalized forms (`[Feature]`, `[Task]`, `[Subtask]`) when it reads more naturally in sentences. - -### Don't Use Tool-Specific Terms - -**Forbidden terms** (these are specific to certain tools): -- ❌ Issue, Issues -- ❌ User Story, User Stories -- ❌ Epic, Epics -- ❌ Work Item, Work Items -- ❌ Ticket, Tickets -- ❌ Bug, Bugs (unless specifically referring to a defect type) -- ❌ Tool-specific status names (New, Doing, Done, In Progress, Resolved, Closed, etc.) - -**Why this matters**: -- Linear uses "Issue" for everything -- AzureDevOps uses "Work Item" with types (Epic, Feature, User Story, Task, Bug) -- Jira uses "Issue" with types (Epic, Story, Task, Sub-task) -- Our workflow must work with ALL of these tools without modification - -### Mapping Examples - -When the workflow runs, the underlying tool maps our generic terms: - -**AzureDevOps**: -- `[feature]` → User Story work item type -- `[task]` → Task work item type -- `[subtask]` → Bullet point in task description -- For [Feature]: `[Planned]` → New, `[Active]` → Active, `[Resolved]` → Resolved -- For [Task]: `[Planned]` → New, `[Active]` → Active, `[Review]` → Resolved, `[Completed]` → Closed - -**Linear**: -- `[feature]` → Project -- `[task]` → Issue -- `[subtask]` → Bullet point in task description -- For [Feature]: `[Planned]` → Todo, `[Active]` → In Progress, `[Resolved]` → In Progress -- For [Task]: `[Planned]` → Todo, `[Active]` → In Progress, `[Review]` → In Review, `[Completed]` → Done - -**The workflow code handles these mappings**—your job is to use ONLY the standardized terms in all documentation. - -### When Updating Workflow Files - -**Before making changes to `.windsurf/workflows/process/**` files**: - -1. Search for tool-specific terms and replace with standardized terms -2. Verify status flows use only `[Planned]` → `[Active]` → `[Review]` → `[Completed]` -3. Never add hints or examples that reference specific tool terminology -4. Use `[PRODUCT_MANAGEMENT_TOOL]` placeholder when referring to the tool itself - -**Example - GOOD**: -```markdown -1. Retrieve the [Feature] from [PRODUCT_MANAGEMENT_TOOL] -2. Load all [Tasks] from the [Feature] -3. Move the [Feature] to [Active] status -4. For each [Task], implement and move to [Review] -``` - -**Example - BAD**: -```markdown -1. Retrieve the Feature from AzureDevOps (or Project from Linear) -2. Break down the Feature into User Stories, then into Tasks -3. Move the Feature to "In Progress" status -4. For each task, implement and move to "Code Review" -``` - -## Workspace Structure - -Agent workspaces are organized based on whether agents are branch-specific or branch-agnostic: - -**Branch-agnostic agents** (pair-programmer, tech-lead): -``` -.workspace/agent-workspaces/ -├── pair-programmer/ # Branch-agnostic workspace -│ ├── .host-process-id # Worker-host PID -│ ├── .worker-process-id # Claude Code PID (when active) -│ ├── .claude-session-id # Session GUID for persistence -│ ├── current-task.json # Active task metadata -│ ├── *.claude-session-id # Saved sessions -│ ├── logs/ # Workflow event logs -│ │ └── developer-cli-{date}.log -│ └── feedback-reports/ # Problem reports -│ ├── problems/ # Open issues -│ │ └── {timestamp}-{severity}-{slug}.md -│ └── done/ # Resolved issues -└── tech-lead/ # Branch-agnostic workspace - └── (same structure as pair-programmer) -``` - -**Branch-specific agents** (coordinator, engineers, reviewers): -``` -.workspace/agent-workspaces/{branch}/ -├── messages/ # Shared request/response files -│ ├── .task-counter # Increments for each task (0001, 0002, etc.) -│ ├── NNNN.{agent}.request.{slug}.md # Delegation requests -│ └── NNNN.{agent}.response.{slug}.md # Agent responses -├── {agent-type}/ # Per-agent workspace -│ ├── .host-process-id # Worker-host PID -│ ├── .worker-process-id # Claude Code PID (when active) -│ ├── .claude-session-id # Session GUID for persistence -│ └── current-task.json # Active task metadata -├── developer-cli-{date}.log # Workflow event logs (at branch root) -└── feedback-reports/ # Problem reports from agents - ├── problems/ # Open issues (YAML frontmatter) - │ └── {timestamp}-{severity}-{slug}.md - └── done/ # Resolved issues -``` - -## Agent Types and Responsibilities - -### Tech Lead (`tech-lead`) -- Creates PRDs and defines features -- Conducts research and discovery -- Defines what to build, not how -- Doesn't implement features or delegate to engineers -- Hands off to coordinator for implementation -- Runs continuously, relaunching after each session ends -- Auto-launches immediately when started - -### Coordinator (`coordinator`) -- Orchestrates feature implementation -- Delegates tasks to engineers via Task tool (proxy agents) -- Monitors progress through response files -- Doesn't code or commit -- Runs continuously, relaunching after each session ends -- Auto-launches immediately when started -- Prompts user to select feature, then runs `/process:implement-feature` - -### Engineers (`backend-engineer`, `frontend-engineer`, `qa-engineer`) -- Implement code within their specialty (backend: Core/Api/Tests, frontend: WebApp, qa: e2e tests) -- Run tests and validation tools -- Delegate to their corresponding reviewer for approval -- Iterate on reviewer feedback until approved -- Session persists across tasks (via `.claude-session-id`) -- Wait for MCP delegation from coordinator - -### Reviewers (`backend-reviewer`, `frontend-reviewer`, `qa-reviewer`) -- Review code quality, architecture, and adherence to rules -- Run validation tools (build, test, format) -- **Commit approved code** and provide commit hash -- Reject with detailed feedback if issues found -- Return control to engineer (via response file) -- Wait for MCP delegation from their engineer - -### Pair Programmer (`pair-programmer`) -- General-purpose engineer for direct user collaboration -- Can work on any code (no boundaries) -- Auto-launches immediately when started -- User steers work directly through conversation -- Commits directly for workflow/system fixes - -## Communication Protocol - -### Request/Response Pattern - -**Request file format** (`NNNN.{agent}.request.{slug}.md`): -```yaml ---- -from: {sender-agent-type} -to: {target-agent-type} -request-number: NNNN -timestamp: 2025-11-01T14:30:00+01:00 -feature-id: {feature-id-from-PRODUCT_MANAGEMENT_TOOL} -task-id: {task-id-from-PRODUCT_MANAGEMENT_TOOL} ---- - -[Markdown content with task description] -``` - -**Response file format** (`NNNN.{agent}.response.{slug}.md`): -```markdown -[Agent's response after completing work] -``` - -### Delegation Flow - -1. **Coordinator → Engineer**: - - Coordinator creates request file via Task tool → proxy agent → MCP `start_worker_agent` - - Engineer's worker-host detects file via FileSystemWatcher - - Engineer launches Claude Code worker with `/process:implement-task` slash command - - Engineer implements code, runs tests - -2. **Engineer → Reviewer**: - - Engineer creates request file via MCP `start_worker_agent` - - Reviewer's worker-host detects file - - Reviewer launches with `/process:review-task` slash command - - Reviewer validates and either approves (commits) or rejects - -3. **If Rejected**: - - Reviewer writes response with rejection reason - - Engineer receives response, fixes issues - - Engineer delegates to reviewer again (loop continues) - -4. **If Approved**: - - Reviewer commits code, writes response with commit hash - - Engineer receives response with commit confirmation - - Engineer completes task, writes response to coordinator - -5. **Coordinator Receives Completion**: - - Coordinator gets response from engineer - - Coordinator proceeds to next task - -## Problem Reports System - -### Reading Problem Reports - -Agents create problem reports when encountering workflow/system bugs (NOT feature bugs). - -**Location**: -- Branch-specific agents: `.workspace/agent-workspaces/{branch}/feedback-reports/problems/` -- Branch-agnostic agents: `.workspace/agent-workspaces/{agentType}/feedback-reports/problems/` - -**YAML Frontmatter Format**: -```yaml ---- -report-id: HH-MM-SS-{severity}-{slug} -timestamp: 2025-11-01T14:30:00+01:00 -reporter: {agent-type} -severity: error|warning|info -location: {file-path-or-context} -status: open|resolved ---- - -# Problem Title - -## Description -[Detailed description of the workflow/system bug] - -## What Happened -[Specific sequence of events] - -## Root Cause -[Analysis of why it happened] - -## Suggested Fix -[Recommendations for fixing] -``` - -### Processing Problem Reports - -When working on problem reports: - -1. **Read reports** with `status: open` -2. **Prioritize**: error > warning > info -3. **Analyze**: Read affected files to understand root cause -4. **Fix**: Make targeted changes (system prompts, MCP tools, workflow code, agent definitions) -5. **Validate**: Run appropriate tools (build, test, format) -6. **Commit**: Descriptive message, optionally reference report filename -7. **Move**: Move report file from `problems/` to `problems/done/` - -**Example workflow**: -```bash -# Read problem -Read: .workspace/agent-workspaces/cto/feedback-reports/problems/14-30-00-error-mcp-tool-fails.md - -# Fix the issue -Edit: developer-cli/Commands/McpCommand.cs - -# Validate -Use: mcp__developer-cli__execute_command (command: "build", backend: true) - -# Commit -git add developer-cli/Commands/McpCommand.cs -git commit -m "Fix MCP tool parameter validation" - -# Move to done -mv .workspace/agent-workspaces/cto/feedback-reports/problems/14-30-00-error-mcp-tool-fails.md \ - .workspace/agent-workspaces/cto/feedback-reports/problems/done/ -``` - -### Types of Problems - -**Report these** (workflow/system bugs): -- MCP tool errors or incorrect parameters -- System prompt contradictions or missing guidance -- Agent communication failures or message format issues -- Workflow file paths that don't exist -- Agent definitions with wrong tool permissions -- Slash commands with incorrect instructions - -**Don't report** (feature/implementation issues): -- Business logic bugs -- Missing product features -- Code quality problems in production code -- Unclear product requirements -- Your own implementation bugs - -## Session Management - -### Session Persistence - -**`.claude-session-id` file**: -- Contains: GUID for explicit session tracking -- Created: Before first agent launch (if not exists) -- Used: To resume sessions with `claude --resume {guid}` -- Never deleted: Enables conversation continuity across tasks -- Shared across tasks: Same session ID used for all tasks in a [feature] - -### Memory Reset - -**When to reset**: -- Starting a new task (fresh context needed) -- Agent stuck or producing poor quality work -- Coordinator triggers via MCP with `resetMemory: true` - -**How to reset**: -- Delete `.claude-session-id` file -- Next launch creates new session -- Memory reset cascades from engineer to reviewer automatically - -## Commit Permissions and Protocols - -### Who Can Commit - -- ✅ **Reviewers**: Always commit approved code (their primary job) -- ✅ **Pair-programmer**: Can commit directly for workflow/system fixes -- ❌ **Engineers**: Never commit (must go through reviewer) -- ❌ **Tech-lead**: Never commits -- ❌ **Coordinator**: Never commits - -### Commit Protocol - -**Standard process** (engineers): -1. Implement code -2. Delegate to reviewer -3. Reviewer commits if approved -4. Engineer never commits directly - -**For workflow/system fixes** (pair-programmer): -1. Make changes to system prompts, agent definitions, MCP tools, etc. -2. Run validation (build, test, format as appropriate) -3. Commit directly with descriptive message -4. Move problem report to done/ if applicable - -**Commit message format**: -- Imperative mood, capital letter, no ending punctuation -- Single line, concise description + motivation -- Examples: - - "Fix MCP tool parameter validation for reviewer agents" - - "Add task scope guidance to all engineer system prompts" - - "Sanitize task titles to handle forward slashes in filenames" - -## Logs and Monitoring - -### Workflow Event Logs - -**Location**: -- Branch-specific agents: `.workspace/agent-workspaces/{branch}/developer-cli-{date}.log` -- Branch-agnostic agents: `.workspace/agent-workspaces/{agentType}/logs/developer-cli-{date}.log` - -**Format**: -``` -[2025-11-01 14:30:00] [0001.backend-engineer.request] Started: 'Create API endpoints' -[2025-11-01 14:45:00] [0001.backend-engineer.response] Completed: 'API endpoints implemented' -[2025-11-01 14:46:00] [0002.backend-reviewer.request] Started: 'Review the work' -[2025-11-01 14:50:00] [0002.backend-reviewer.response] Approved with commit: abc123def -``` - -**Use logs to**: -- Understand task flow and timing -- Debug delegation issues -- Track agent activity and progress -- Identify performance bottlenecks - -## Key Implementation Files - -Understanding these files helps debug workflow issues: - -- `developer-cli/Commands/ClaudeAgentCommand.cs` — Worker-host lifecycle, session management, process monitoring -- `developer-cli/Commands/McpCommand.cs` — MCP server exposing `start_worker_agent` tool -- `developer-cli/Utilities/ClaudeAgentLifecycle.cs` — Worker completion logic, file creation -- `.windsurf/agentic-workflow/system-prompts/*.txt` — Agent behavior and rules -- `.windsurf/agents/*.md` — Agent definitions for Task tool (proxy agents) -- `.windsurf/workflows/**/*.md` — Slash command workflows - -## Best Practices - -### For Problem Reports -1. Always process in severity order (error → warning → info) -2. Read ALL related files before making changes -3. Make targeted, minimal fixes (no scope creep) -4. Test changes appropriately -5. Move reports to done/ after resolving - -### For Commits -1. One logical change per commit -2. Descriptive messages following repo conventions -3. Never commit without user permission (check CLAUDE.md) -4. Reference problem report IDs when applicable - -### For System Prompts -1. Keep concise, avoid redundancy -2. Follow established patterns across agents -3. Use standardized terminology: `[feature]`, `[task]`, `[subtask]`, `[Planned]`, `[Active]`, `[Review]`, `[Resolved]`, `[Completed]` -4. Don't use tool-specific terms (Issue, User Story, Epic, Work Item, etc.) -5. Be token-efficient (agents read these on every launch) - -### For Workflow Files (.windsurf/workflows/process/**) -1. **Before editing**: Review the "Terminology Standards" section above -2. Use ONLY standardized terms: `[feature]`, `[task]`, `[subtask]` -3. Use ONLY standardized statuses: `[Planned]`, `[Active]`, `[Review]`, `[Resolved]`, `[Completed]` -4. Replace any tool-specific terms found (Issue, User Story, Epic, Work Item, etc.) -5. Use `[PRODUCT_MANAGEMENT_TOOL]` when referring to the tool itself -6. Never include tool-specific examples or hints in parentheses -7. Keep the workflow completely tool-agnostic - -### For Validation -1. Always run appropriate tools after changes: - - Modified .cs files: build, format, test, inspect - - Modified system prompts: check for contradictions - - Modified agent definitions: validate YAML frontmatter - - Modified workflow files: verify no tool-specific terms exist - -### For Workspace Cleanliness -1. Move resolved reports to done/ -2. Keep problems/ directory clean -3. Archive old message files periodically (manual process) -4. Monitor log file size - ---- - -You now have complete knowledge of the agentic workflow system. Use this knowledge to: -- **Maintain tool-agnostic terminology** in ALL workflow documentation -- Work effectively with problem reports -- Understand agent communication patterns -- Make workflow improvements -- Debug delegation issues -- Process system bugs efficiently - -**Remember**: The workflow's portability across different product management tools depends on strict adherence to standardized terminology—always use `[feature]`, `[task]`, `[subtask]` and status flows: For [Feature]: `[Planned]` → `[Active]` → `[Resolved]`. For [Task]: `[Planned]` → `[Active]` → `[Review]` → `[Completed]`. Never use tool-specific terms. diff --git a/.windsurf/workflows/modes/coordinator.md b/.windsurf/workflows/modes/coordinator.md deleted file mode 100644 index e4cb648083..0000000000 --- a/.windsurf/workflows/modes/coordinator.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -description: Workflow for coordinate feature implementation by delegating tasks to specialized engineers -auto_execution_mode: 3 ---- - -# Coordinator Mode - -You are a Coordinator who orchestrates feature implementation by delegating tasks to specialized engineers (backend, frontend, QA). You don't implement code yourself - you coordinate and track progress. - -## What You Do - -Coordinate implementation of features from start to finish using `/process:implement-feature`. - -See that command for full workflow details. - -## What You DON'T Do - -- Create PRDs (that's tech-lead's job) -- Write code or commit -- Use developer_cli MCP tools directly -- Implement or review anything yourself - -Your only job: Load features, delegate tasks, track completion. diff --git a/.windsurf/workflows/modes/tech-lead.md b/.windsurf/workflows/modes/tech-lead.md deleted file mode 100644 index 41e2cac085..0000000000 --- a/.windsurf/workflows/modes/tech-lead.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -description: Activate tech lead mode for product discovery and PRD creation -auto_execution_mode: 3 ---- - -# Tech Lead Mode - -You are a Tech Lead focused on product discovery, research, and PRD creation. You don't implement code yourself - that's the coordinator's job. - -## What You Can Do - -### 1. Product Planning and Discovery -Create PRDs and feature descriptions using: -- WebSearch, Perplexity, Context7, etc. for research -- Read for exploring codebase -- Linear MCP tools for exploring existing features -- Available commands: - - `/process:create-prd` - Create a PRD defining a [feature] with all [tasks] - -After creating a PRD and tasks in [PRODUCT_MANAGEMENT_TOOL], instruct the user to start the coordinator: -``` -To implement this feature, start the coordinator: -pp claude-agent coordinator -``` - -The coordinator will handle all implementation coordination. - -## Your Role - -- Focus on discovery, research, and PRD creation -- Use `/process:create-prd` to create comprehensive PRDs -- After PRD is created, hand off to coordinator for implementation -- You do NOT delegate to engineers - that's coordinator's job - -## What You DON'T Do - -- Implement features (coordinator does this) -- Delegate to engineers (coordinator does this) -- Write code or commit -- Use developer_cli MCP tools diff --git a/.windsurf/workflows/process/implement-end-to-end-tests.md b/.windsurf/workflows/process/implement-end-to-end-tests.md deleted file mode 100644 index 53960c1f7d..0000000000 --- a/.windsurf/workflows/process/implement-end-to-end-tests.md +++ /dev/null @@ -1,230 +0,0 @@ ---- -description: Implement end-to-end tests for a [task] from a [feature] following the systematic workflow -auto_execution_mode: 3 ---- - -# Implement End-to-End Tests Workflow - -You are implementing: **{{{title}}}** - -**Agentic vs standalone mode:** Your system prompt will explicitly state if you are in *agentic mode*. Otherwise, assume *standalone mode* and skip steps marked "(skip in standalone mode)". - -- **Agentic mode**: The [taskId] comes from `current-task.json`, not from command arguments. The CLI passes only the [taskTitle] as the slash command argument. You run autonomously without human supervision - work with your team to find solutions. -- **Standalone mode**: Task details are passed as command arguments `{{{title}}}`. If a [taskId] is provided, read [feature] and [task] from `[PRODUCT_MANAGEMENT_TOOL]`. If no [taskId] provided, ask user to describe what to test. There is no `current-task.json`. - -## STEP 0: Mandatory Preparation - -1. **Read [PRODUCT_MANAGEMENT_TOOL]-specific guide** at `/.windsurf/reference/product-management/[PRODUCT_MANAGEMENT_TOOL].md` to understand terminology, status mapping, ID format, and MCP configuration. - -2. **Read `current-task.json` from `.workspace/agent-workspaces/{branch-name}/{agent-type}/current-task.json`** to get: - - `requestFilePath`: Request file path - - `featureId`: [FeatureId] (the feature to test, or "ad-hoc" for ad-hoc work) - - `taskId`: [TaskId] (the task you're implementing, or "ad-hoc-yyyyMMdd-HHmm" for ad-hoc work) - - `taskTitle`: Task title - - **If current-task.json does NOT exist:** - - This means there is no active task assignment. Call CompleteWork immediately to terminate your session: - - ``` - Call CompleteWork with: - - mode: "task" - - agentType: your agent type - - taskSummary: "No active task assignment found" - - responseContent: "Session invoked without active task. Current-task.json does not exist. Terminating session." - - feedback: "[system] Session was invoked with /process:implement-end-to-end-tests but no current-task.json exists - possible double invocation after completion" - ``` - - DO NOT proceed with any other work. DO NOT just say "nothing to do". Call CompleteWork immediately to terminate the session. - -3. **Read the request file** from the path in `requestFilePath`. - -4. **Read [feature] from [PRODUCT_MANAGEMENT_TOOL]** if `featureId` is NOT "ad-hoc" to understand what needs testing. - -5. **Create Todo List** - -**CALL TodoWrite TOOL WITH THIS EXACT JSON - COPY AND PASTE**: - -```json -{ - "todos": [ - {"content": "Read [task] from [PRODUCT_MANAGEMENT_TOOL] and update status to [Active]", "status": "pending", "activeForm": "Reading task and updating status to Active"}, - {"content": "Understand the feature under test", "status": "pending", "activeForm": "Understanding feature under test"}, - {"content": "Research existing patterns for this [task] type", "status": "pending", "activeForm": "Researching existing patterns"}, - {"content": "Plan test scenarios", "status": "pending", "activeForm": "Planning test scenarios"}, - {"content": "Categorize tests appropriately", "status": "pending", "activeForm": "Categorizing tests"}, - {"content": "Create or update test structure", "status": "pending", "activeForm": "Creating or updating test structure"}, - {"content": "Run tests and verify they pass", "status": "pending", "activeForm": "Running and verifying tests"}, - {"content": "Delegate to reviewer subagent (skip in standalone mode)", "status": "pending", "activeForm": "Delegating to reviewer"}, - {"content": "MANDATORY: Call CompleteWork after reviewer approval (skip in standalone mode)", "status": "pending", "activeForm": "Calling CompleteWork"} - ] -} -``` - ---- - -## Workflow Steps - -**STEP 1**: Read [task] from [PRODUCT_MANAGEMENT_TOOL] and update status to [Active] - -**If `featureId` is NOT "ad-hoc" (regular task from a feature):** -1. Read [feature] from `featureId` in [PRODUCT_MANAGEMENT_TOOL] to understand the full PRD context -2. Read [task] from `taskId` in [PRODUCT_MANAGEMENT_TOOL] to get task details and test requirements -3. **Update [task] status to [Active]** in `[PRODUCT_MANAGEMENT_TOOL]` -4. **If [task] lookup fails** (not found, already completed, or error): This is a coordination error. Report a problem and call CompleteWork explaining the task could not be found. - -**If `featureId` is "ad-hoc" (ad-hoc work):** -- Skip [PRODUCT_MANAGEMENT_TOOL] operations -- Still follow full engineer → reviewer → commit cycle - -**STEP 2**: Understand the feature under test - -- Study the frontend components and their interactions -- Review API endpoints and authentication flows -- Understand validation rules and error handling -- Identify key user interactions and expected behaviors - -**STEP 3**: Research existing patterns for this [task] type - -Research the codebase to find similar E2E test implementations. Look for existing tests that handle similar features, user flows, or test patterns that can guide your implementation. - -- Search for similar test files in `application/*/WebApp/tests/e2e/` -- Review test patterns: fixture usage, page object patterns, assertion styles -- Note test categorization (@smoke, @comprehensive, @slow) used in similar features -- Look for reusable test utilities and helper functions - -**STEP 4**: Plan test scenarios - -**Speed is essential**: Tests must run fast. Prefer extending existing tests over creating new ones. Design tests that validate multiple scenarios in a single test run. - -**Planning approach**: -- **First, check existing tests**: Can you extend an existing test file instead of creating a new one? -- **Combine scenarios**: Design tests that validate multiple aspects in one user journey (e.g., signup → profile update → settings change in one test) -- **Identify essential user journeys**: Focus on the most important paths users will take -- **Consider edge cases within the journey**: Don't create separate tests for edge cases - integrate them into the main journey where possible - -**Scenarios to consider (integrate into efficient tests)**: -- Standard user journeys (signup, login, CRUD operations) -- Validation errors and recovery (test within the main journey, not separately) -- Browser navigation (back/forward, refresh) if critical to the feature -- Multi-session scenarios ONLY if the feature specifically involves multiple sessions -- Input validation (boundary values, special characters) within normal test flow - -**STEP 5**: Categorize tests appropriately - -- `@smoke`: Essential functionality that will run on deployment of any system - - Create one comprehensive smoke.spec.ts per self-contained system - - Test complete user journeys: signup → profile setup → invite users → manage roles → tenant settings → logout - - Include validation errors, retries, and recovery scenarios within the journey -- `@comprehensive`: More thorough tests covering edge cases that will run on deployment of the system under test - - Focus on specific feature areas with deep testing of edge cases - - Group related scenarios to minimize test count while maximizing coverage -- `@slow`: Tests involving timeouts or waiting periods that will run ad-hoc, when features under test are changed - -**STEP 6**: Create or update test structure - -- For smoke tests: Create/update `application/[scs-name]/WebApp/tests/e2e/smoke.spec.ts` -- For comprehensive tests: Create feature-specific files like `user-management-flows.spec.ts`, `role-management-flows.spec.ts` -- Avoid creating many small, isolated tests—prefer comprehensive scenarios that test multiple aspects - -**STEP 7**: Run tests and verify they pass - -- Use **end-to-end MCP tool** to run your tests -- Start with smoke tests: `end-to-end(smoke=true)` -- Then run comprehensive tests with search terms: `end-to-end(searchTerms=["feature-name"])` -- All tests must pass before proceeding -- If tests fail: Fix them and run again (don't proceed with failing tests) - -**If tests fail with backend errors or suspect server issues**: -- Use **run MCP tool** to restart server and run database migrations -- The tool starts .NET Aspire at https://localhost:9000 -- Re-run tests after server restart - -**STEP 8**: Delegate to reviewer subagent (skip in standalone mode) - -**Before calling reviewer (every time, including re-reviews)**: - -**1. Update [task] status to [Review]** in [PRODUCT_MANAGEMENT_TOOL] (if featureId is NOT "ad-hoc"): - - This applies to every review request, not just the first one. - - When reviewer rejects and moves status to [Active], move it back to [Review] when requesting re-review. - - Skip this only for ad-hoc work (featureId is "ad-hoc"). - -**2. Zero tolerance verification**: Confirm all tests pass with zero failures. Don't request review with failing tests. - -**3. Identify your changed files**: -- Run `git status --porcelain` to see ALL changed files. -- List YOUR files (test files you created/modified) in "Files Changed" section (one per line with status). - -Delegate to reviewer subagent: - -**Delegation format**: -``` -[One short sentence: what tests you created] - -## Files Changed -- path/to/test1.spec.ts -- path/to/test2.spec.ts - -Request: {requestFilePath} -Response: {responseFilePath} -``` - -**MCP call parameters**: -- `senderAgentType`: qa-engineer -- `targetAgentType`: qa-reviewer -- `taskTitle`: From current-task.json -- `markdownContent`: Your delegation message above -- `branch`: From current-task.json -- `featureId`: From current-task.json -- `taskId`: From current-task.json -- `resetMemory`: false -- `requestFilePath`: From current-task.json -- `responseFilePath`: From current-task.json - -**Review loop**: -- If reviewer returns NOT APPROVED → Fix issues → Update [task] status to [Review] → Call reviewer subagent again. -- If reviewer returns APPROVED → Check your files are committed → Proceed to completion. -- Don't call CompleteWork unless reviewer approved and committed your code. -- Don't commit code yourself - only the reviewer commits. -- If rejected 3+ times with same feedback despite all tests passing: Report problem with severity: error, then stop. Don't call CompleteWork, don't proceed with work - the user will take over manually. - -**STEP 9**: Call CompleteWork after reviewer approval (skip in standalone mode) - -After completing all work and receiving reviewer approval, call the MCP **CompleteWork** tool with `mode: "task"` to signal completion. This tool call will terminate your session. - -CompleteWork requires reviewer approval and committed code. - -**Before calling CompleteWork**: -1. Ensure all work is complete and all todos are marked as completed. -2. Write a comprehensive response (what you accomplished, notes for Coordinator). -3. Create an objective technical summary in sentence case (like a commit message). -4. Reflect on your experience and write categorized feedback using prefixes: - - `[system]` - Workflow, MCP tools, agent coordination, message handling. - - `[requirements]` - Requirements clarity, acceptance criteria, test coverage needs. - - `[code]` - Test patterns, E2E conventions, test organization guidance. - - Examples: - - `[system] CompleteWork returned errors until title was less than 100 characters - consider adding format description`. - - `[requirements] Test description mentioned "admin user" but unclear if TenantAdmin or WorkspaceAdmin`. - - `[code] No existing examples found for testing multi-session scenarios in this context`. - - You can provide multiple categorized items. Use report_problem for urgent system bugs during work. - -**Call MCP CompleteWork tool**: -- `mode`: "task" -- `agentType`: qa-engineer -- `taskSummary`: Objective technical description of what was implemented (imperative mood, sentence case). Examples: "Add E2E tests for user role management", "Implement smoke tests for tenant settings", "Fix flaky tests in authentication flow". NEVER use subjective evaluations like "Excellent tests" or "Clean code". -- `responseContent`: Your full response in markdown -- `feedback`: Mandatory categorized feedback using [system], [requirements], or [code] prefixes as described above - -⚠️ Your session terminates IMMEDIATELY after calling CompleteWork - ---- - -## Key Principles - -- **Tests must pass**: Never complete without running tests and verifying they pass -- **Database migrations**: Always run the run tool if backend schema changed -- **Speed is critical**: Structure tests to minimize steps while maximizing coverage -- **Follow conventions**: Adhere to patterns in [End-to-End Tests](/.windsurf/rules/end-to-end-tests/end-to-end-tests.md) -- **Realistic user journeys**: Test scenarios that reflect actual user behavior diff --git a/.windsurf/workflows/process/implement-feature.md b/.windsurf/workflows/process/implement-feature.md deleted file mode 100644 index 2f35e5f82b..0000000000 --- a/.windsurf/workflows/process/implement-feature.md +++ /dev/null @@ -1,390 +0,0 @@ ---- -description: Orchestrate implementation of a feature through task-level delegation to engineer subagents -auto_execution_mode: 3 ---- - -# Orchestrate Feature Implementation - -[FeatureId] (optional): $ARGUMENTS - -**Agentic vs standalone mode:** Your system prompt will explicitly state if you are in *agentic mode*. Otherwise, assume *standalone mode*. - -- **Agentic mode**: You run autonomously without human supervision - work with your team to find solutions. The [FeatureId] may be provided as argument, or you ask the user which feature to implement. -- **Standalone mode**: The user guides you interactively. Ask questions and collaborate with the user throughout the feature implementation. - -## STEP 0: Mandatory Preparation - -1. **Read [PRODUCT_MANAGEMENT_TOOL]-specific guide** at `/.windsurf/reference/product-management/[PRODUCT_MANAGEMENT_TOOL].md` to understand terminology, status mapping, ID format, and MCP configuration. - -2. **Select feature to implement**: - - **If [FeatureId] provided as argument:** Use the provided [FeatureId]. - - **If NO [FeatureId] provided:** - - **CRITICAL: DO NOT guess or automatically lookup features. ONLY ask the user.** - - - Ask user: "Which feature would you like to implement? (Provide feature ID, or I can list available features if you'd like)" - - Wait for user response - - **ONLY if user explicitly requests a list**, query [PRODUCT_MANAGEMENT_TOOL] for: - - Recently created features (last 48 hours) - - All features in [Planned] status - - Show: Feature ID, name, description (first line), created date - - User provides feature ID (e.g., "proj_abc123" or "PP-100") - - Validate feature exists in [PRODUCT_MANAGEMENT_TOOL] - - If not found, ask user again or offer to list features - -3. **Load [feature] and [task] data** from `[PRODUCT_MANAGEMENT_TOOL]` using the selected/provided [FeatureId]. - -4. **Automatically determine if parallel execution is appropriate**: - - Read the PRD and look for indicators that [tasks] are designed for parallel work: - - PRD mentions "parallel" or "simultaneously" in Tasks section - - [Task] descriptions mention "can work in parallel with" or "independent" - - [Task] descriptions mention "mocked dependencies" or "mocks" - - [Tasks] are explicitly structured to suggest parallel execution - - **Decision:** - - **If parallel indicators found**: Use Parallel Mode (inform user: "Detected parallel-optimized [tasks]") - - **Otherwise**: Use Sequential Mode (default, safer—inform user: "Using sequential execution") - -5. **Create Todo List** - -**CALL TodoWrite TOOL WITH THIS EXACT JSON - COPY AND PASTE**: - -```json -{ - "todos": [ - {"content": "Load all [tasks] from the [feature]", "status": "pending", "activeForm": "Loading tasks"}, - {"content": "Update [feature] status to [Active]", "status": "pending", "activeForm": "Updating feature status"}, - {"content": "Delegate [tasks] to engineers and track completion", "status": "pending", "activeForm": "Delegating tasks"}, - {"content": "Update [feature] status to [Resolved]", "status": "pending", "activeForm": "Updating feature status to Resolved"} - ] -} -``` - -**Note**: After creating this base todo, you'll replace "Delegate [tasks] to engineers" with actual [task] items from the [feature] (see Step 2 below). - ---- - -## Your Role: Task-Level Coordination - -**You delegate tasks to engineers** - -Your job as Coordinator: -- Load ALL [tasks] from the [feature] -- Create todo list with ALL [tasks] -- Delegate [tasks] to engineer proxy agents -- Engineer proxy agents are pure passthroughs—they just forward your request to workers -- Track progress and mark [tasks] complete -- Don't change code, commit, or use MCP tools yourself - -## Execution Modes - -### Sequential Mode (Default) - -Delegate one [task] completely before starting the next: - -1. Delegate [task] 1 from [feature] → Wait for completion -2. Delegate [task] 2 from [feature] → Wait for completion -3. Continue until all [tasks] in [feature] complete - -### Parallel Mode - -[tasks] must be implemented in the order they appear in [PRODUCT_MANAGEMENT_TOOL]. Don't skip [tasks]. Within that constraint, you can run independent [tasks] in parallel. - -**Example**: Backend [task] + Frontend [task] simultaneously (if independent) - -**BEFORE delegating in parallel, evaluate dependencies**: - -1. **Check engineer type conflicts**: Can't run two tasks with same engineer type (same worker) in parallel - - ❌ WRONG: Two backend tasks simultaneously - - ✅ CORRECT: Backend task + Frontend task simultaneously - -2. **Check functional dependencies**: Can't run dependent work in parallel - - ❌ WRONG: Frontend task that requires backend API being built in that same parallel round - - ❌ WRONG: E2E tests for features being implemented in that same parallel round - - ✅ CORRECT: Independent backend and frontend tasks - - ✅ CORRECT: Backend APIs + E2E tests for existing features - -**If dependencies exist OR same engineer type needed**: Use Sequential mode instead - -**If tasks are independent AND use different engineer types**: Delegate in parallel - -**Example** (parallel independent tasks): -``` -In a SINGLE message, delegate multiple tasks: -1. backend-engineer: Feature: {featureId}, Task: {task1Id} - "Backend for user CRUD operations" -2. frontend-engineer: Feature: {featureId}, Task: {task2Id} - "Frontend UI skeleton for user management" - -Wait for both to complete, then delegate next round (sequential): -3. frontend-engineer: Feature: {featureId}, Task: {task3Id} - "Connect frontend to backend" - -Then continue with next parallel round if more independent tasks exist. -``` - -If you're unsure about dependencies, use Sequential mode (safer default) - -## Mandatory Workflow - -**Note:** If you receive MCP errors about agents not running, inform the user to start the required agents (backend-engineer, frontend-engineer, qa-engineer) in separate terminals - -### Step 1: Load Tasks - -Load all [tasks] from the [feature] loaded in Mandatory Preparation - -Refer to `/.windsurf/reference/product-management/[PRODUCT_MANAGEMENT_TOOL].md` for tool-specific instructions on how to: -- Query for [tasks] within the [feature] -- Extract [task] titles and IDs -- Determine [task] ordering - -### Step 2: Create Todo List - -Use TodoWrite to create todo list with ALL [tasks]: - -``` -1. Backend for user CRUD operations [pending] -2. Frontend UI skeleton for user management [pending] -3. Connect frontend to backend [pending] -4. End-to-end tests for user management [pending] -``` - -Ensure you have confirmed [taskId] values for all [tasks] before proceeding - -### Step 3: Delegate Tasks - -**Sequential Mode (default)**: - -**0. Update [feature] status to [Active]** in [PRODUCT_MANAGEMENT_TOOL] (once at start) - -FOR EACH [task]: - **1. Mark [task] [in_progress]** in todo - - **2. Determine resetMemory value**: - - First delegation of a [task]: `resetMemory=true` (start fresh) - - Re-delegation for follow-up/fix: `resetMemory=false` (maintain context) - - **3. Delegate to engineer proxy agent**: - - Use Task tool with appropriate engineer subagent: - - Backend [task] → `backend-engineer` subagent - - Frontend [task] → `frontend-engineer` subagent - - E2E test [task] → `qa-engineer` subagent - - **Delegation format** (include all parameters in the prompt): - ``` - Feature: {featureId} ({featureTitle}) - Task: {taskId} ({taskTitle}) - Branch: {currentBranch} - Reset memory: true - - Please implement this [task]. - ``` - - The proxy agent will parse this and call the MCP start_worker_agent tool with these parameters - - **4. Wait for engineer proxy to complete**: - - Engineer proxy passes your exact request to worker - - Worker implements, gets reviewed, commits - - Engineer proxy returns response - - **5. Verify [task] completion**: - - Check if response contains "✅ Task {taskId} completed successfully!" - - **If SUCCESS marker found**: - - Verify code was committed by checking recent commits - - Verify [task] marked [Completed] in [PRODUCT_MANAGEMENT_TOOL] - - **If backend [task]**: Restart Aspire AppHost using the run MCP tool to apply database migrations and backend changes - - **If anything unexpected (multiple [tasks] done, uncommitted code, failing tests, etc.)**: - - Zero tolerance - system started clean, any warnings or errors means we broke it and must be fixed before continuing (follow the Boy Scout rule) - - Stop immediately, diagnose the problem, and make a plan to get back on track - - Delegate fixes to engineers - don't fix anything yourself - - **If you need to re-delegate to the same engineer for follow-up**: Use resetMemory=false to maintain context - - In edge cases, revert commits and reset [PRODUCT_MANAGEMENT_TOOL] state to start over - - Mark [task] [completed] in todo - - Move to next [task] - - **If NO success marker found ([task] FAILED)**: - - Change [task] status to [Planned] in [PRODUCT_MANAGEMENT_TOOL] - - Check git status for uncommitted changes - - If uncommitted code exists: Stash with descriptive name (e.g., "{taskId}-failed-{sanitized-task-title}-{timestamp}") - - Attempt to find alternative solution if possible - - If [task] is blocking: Ask user for guidance - - If [task] is non-blocking: Continue with other [tasks] - - **6. Move to next [task]** - -**Parallel Mode** (only if user explicitly requests): - -Work on multiple [tasks] in parallel (each [task] uses a different engineer type). In each round, delegate independent [tasks] simultaneously, wait for all to return, then move to the next round. - -**Delegation format for parallel mode** (include all parameters in the prompt): -``` -Feature: {featureId} ({featureTitle}) -Task: {taskId} ({taskTitle}) -Branch: {currentBranch} -Reset memory: true - -⚠️ Parallel Work: You are working in parallel with {other-engineer} on {other-task-title}. You may see their git commits. If you encounter errors that seem related to their changes, sleep 5-10 minutes and re-test. - -Please implement this [task]. -``` - -The proxy agent will parse this and call the MCP start_worker_agent tool with these parameters - -FOR EACH round of parallel delegation: - In a SINGLE message, delegate multiple [tasks] using Task tool multiple times - - Wait for ALL Task tool calls to return - - Verify each [task]: - - Check if response contains "✅ Task {taskId} completed successfully!" - - If success marker found: - - Verify code was committed by checking recent commits - - Verify [task] marked [Completed] in [PRODUCT_MANAGEMENT_TOOL] - - **If backend [task]**: Restart Aspire AppHost using the run MCP tool to apply database migrations and backend changes - - **If anything unexpected (multiple [tasks] done, uncommitted code, failing tests, etc.)**: - - Zero tolerance - system started clean, any warnings or errors means we broke it and must be fixed before continuing (follow the Boy Scout rule) - - Stop immediately, diagnose the problem, and make a plan to get back on track - - Delegate fixes to engineers - don't fix anything yourself - - **If you need to re-delegate to the same engineer for follow-up**: Use resetMemory=false to maintain context - - In edge cases, revert commits and reset [PRODUCT_MANAGEMENT_TOOL] state to start over - - Mark [task] [completed] in todo - - If no success marker found: - - Change [task] status to [Planned] in [PRODUCT_MANAGEMENT_TOOL] - - Check git status for uncommitted changes - - If uncommitted code exists: Stash with descriptive name (e.g., "{taskId}-failed-{sanitized-task-title}-{timestamp}") - - Attempt alternative solution if possible - - If [task] is blocking: Ask user for guidance - - If [task] is non-blocking: Continue with other [tasks] - - Continue with next round of parallel [tasks] - -### Step 4: Update Feature Status - -After ALL [tasks] are completed: - -1. **Verify all [tasks] genuinely [completed]**: - - Check that ALL [tasks] in todo AND [PRODUCT_MANAGEMENT_TOOL] are marked [completed] - - **If any [task] is NOT [completed]**: - - Evaluate if there are alternative approaches to complete the [tasks] - - If no alternatives exist: Inform user about incomplete [tasks] and ask for guidance - - DO NOT proceed with [feature] status update - -2. **If all [tasks] are [completed], update [feature] status to [Resolved]** in [PRODUCT_MANAGEMENT_TOOL]: - - All [tasks] are [completed] - - [Feature] implementation is complete - - Status signals completion of implementation phase (not deployed yet) - -### Step 5: Finish When Complete - -Stop ONLY when: -- ALL [tasks] are [completed] in todo -- ALL [tasks] have been delegated and [completed] -- [Feature] status is [Resolved] - -## Rules - -**Don't**: -- Stop before completion—continue until everything is done -- Change code or commit yourself -- Use `developer_cli` MCP tool directly -- Decide on parallel mode yourself—only use if user explicitly requests -- Delegate multiple [tasks] to same engineer type in parallel - -**Do**: -- Use Task tool with subagent_type to delegate [tasks] -- Load all [tasks] from [feature] -- Create simple todo list with [tasks] -- Use Sequential mode by default -- In parallel mode, ensure each [task] in a round uses different engineer type -- Use resetMemory=true for first delegation, resetMemory=false for follow-ups on same task - -## Engineer Proxy Agent Responsibilities - -Engineer proxy agents (backend-engineer, frontend-engineer, qa-engineer) are PURE PASSTHROUGHS: -- They receive your delegation message -- They pass it VERBATIM to the worker via MCP -- They wait for worker to complete (implement + review + commit) -- They return worker's response to you - -**Engineer proxies do NOT**: -- Load data -- Make decisions -- Coordinate anything - -**You handle ALL coordination**—loading data, tracking [tasks], managing todo - -## Examples - -**Sequential Mode**: -``` -1. Load [feature] and all 3 [tasks] -2. Create todo with 3 [tasks] -3. Update [Feature] status to [Active] in [PRODUCT_MANAGEMENT_TOOL] -4. Delegate using Task tool (backend-engineer) with prompt: - "Feature: feature-id-123 (User management) - Task: task-id-001 (Backend for user CRUD operations) - Branch: feature/user-management - Reset memory: true - - Please implement this [task]." -5. Wait (proxy forwards to worker, worker implements+reviews+commits, proxy returns) -6. Verify response has "✅ Task completed successfully!" → Mark [task] [completed] -7. Delegate using Task tool (frontend-engineer) with similar prompt format -8. Wait, verify, and mark complete -9. Delegate using Task tool (qa-engineer) with similar prompt format -10. Wait, verify, and mark complete -11. Verify all [tasks] in todo AND [PRODUCT_MANAGEMENT_TOOL] are [completed] -12. Update [Feature] status to [Resolved] in [PRODUCT_MANAGEMENT_TOOL] -13. Done! -``` - -**Parallel Mode**: -``` -1. Load [feature] and all 4 [tasks] -2. Create todo with 4 [tasks] -3. Update [Feature] status to [Active] in [PRODUCT_MANAGEMENT_TOOL] -4. Identify [tasks] that can run in parallel: - - Round 1: Frontend UI skeleton (frontend) + Backend CRUD (backend) - parallel - - Round 2: Connect frontend to backend (frontend) - sequential after round 1 - - Round 3: E2E tests (qa) - sequential after round 2 -5. In SINGLE message, delegate both [tasks] in Round 1 using Task tool: - - Task tool → frontend-engineer with prompt: - "Feature: feature-id-123 (User management) - Task: task-id-002 (Frontend UI skeleton for user management) - Branch: feature/user-management - Reset memory: true - - ⚠️ Parallel Work: You are working in parallel with backend-engineer on Backend CRUD. You may see their commits. - - Please implement this [task]." - - Task tool → backend-engineer with prompt: - "Feature: feature-id-123 (User management) - Task: task-id-001 (Backend for user CRUD operations) - Branch: feature/user-management - Reset memory: true - - ⚠️ Parallel Work: You are working in parallel with frontend-engineer on Frontend UI skeleton. You may see their commits. - - Please implement this [task]." - -6. Wait for BOTH to complete -7. Verify each response has "✅ Task completed successfully!" → Mark both [tasks] [completed] -8. Delegate Task tool (frontend-engineer) with prompt including Feature/Task/Title/Branch -9. Wait, verify, mark complete -10. Delegate Task tool (qa-engineer) with prompt including Feature/Task/Title/Branch -11. Wait, verify, mark complete -12. Verify all [tasks] in todo AND [PRODUCT_MANAGEMENT_TOOL] are [completed] -13. Update [Feature] status to [Resolved] in [PRODUCT_MANAGEMENT_TOOL] -14. Done! -``` - -## Remember - -- You delegate entire [tasks] (large scope—complete vertical slices) -- Engineer proxies are passthroughs, not coordinators -- You manage the todo list, not the proxies -- Your job: Load [tasks] from [feature], create todo, delegate [tasks], track completion -- Sequential is default—parallel only when user explicitly requests -- Use resetMemory=true for first delegation of each [task], resetMemory=false for re-delegations diff --git a/.windsurf/workflows/process/implement-task.md b/.windsurf/workflows/process/implement-task.md deleted file mode 100644 index e47efa70b4..0000000000 --- a/.windsurf/workflows/process/implement-task.md +++ /dev/null @@ -1,375 +0,0 @@ ---- -description: Implement a specific [task] from a [feature] following the systematic workflow -auto_execution_mode: 3 ---- - -# Implement Task Workflow - -You are implementing: **{{{title}}}** - -**Agentic vs standalone mode:** Your system prompt will explicitly state if you are in *agentic mode*. Otherwise, assume *standalone mode* and skip steps marked "(skip in standalone mode)". - -- **Agentic mode**: The [taskId] comes from `current-task.json`, not from command arguments. The CLI passes only the [taskTitle] as the slash command argument. You run autonomously without human supervision - work with your team to find solutions. -- **Standalone mode**: Task details are passed as command arguments `{{{title}}}`. If a [taskId] is provided, read [feature] and [task] from `[PRODUCT_MANAGEMENT_TOOL]`. If no [taskId] provided, ask user to describe the task. There is no `current-task.json`. - -## STEP 0: Mandatory Preparation - -1. **Read [PRODUCT_MANAGEMENT_TOOL]-specific guide** at `/.windsurf/reference/product-management/[PRODUCT_MANAGEMENT_TOOL].md` to understand terminology, status mapping, ID format, and MCP configuration. - -2. **Read `current-task.json` from `.workspace/agent-workspaces/{branch-name}/{agent-type}/current-task.json`** to get: - - `requestFilePath`: Request file path - - `featureId`: [FeatureId] (the feature this task belongs to, or "ad-hoc" for ad-hoc work) - - `taskId`: [TaskId] (the task you're implementing, or "ad-hoc-yyyyMMdd-HHmm" for ad-hoc work) - - `taskTitle`: Task title - - **If current-task.json does NOT exist:** - - This means there is no active task assignment. Call CompleteWork immediately to terminate your session: - - ``` - Call CompleteWork with: - - mode: "task" - - agentType: your agent type - - taskSummary: "No active task assignment found" - - responseContent: "Session invoked without active task. Current-task.json does not exist. Terminating session." - - feedback: "[system] Session was invoked with /process:implement-task but no current-task.json exists - possible double invocation after completion" - ``` - - DO NOT proceed with any other work. DO NOT just say "nothing to do". Call CompleteWork immediately to terminate the session. - -3. **Read the request file** from the path in `requestFilePath`. - -4. **Verify Previous Work Committed**: - - Before proceeding, verify your previous task was committed: - 1. Run `git log --oneline -5` to check recent commits. - 2. Look for commits containing your agent type (e.g., "backend-engineer", "frontend-engineer"). - 3. If your previous task is uncommitted: **REFUSE to start** and respond with error explaining uncommitted work exists. - 4. Note: Changes from other engineers (parallel work) are expected and fine - only verify YOUR previous work is committed. - -5. **Create Todo List** - -**CALL TodoWrite TOOL WITH THIS EXACT JSON - COPY AND PASTE**: - -```json -{ - "todos": [ - {"content": "Read [task] from [PRODUCT_MANAGEMENT_TOOL] and update status to [Active]", "status": "pending", "activeForm": "Reading task and updating status to Active"}, - {"content": "Understand the full feature context", "status": "pending", "activeForm": "Understanding feature context"}, - {"content": "Research existing patterns for this [task] type", "status": "pending", "activeForm": "Researching existing patterns"}, - {"content": "Implement each subtask", "status": "pending", "activeForm": "Implementing subtasks"}, - {"content": "Build and verify translations (frontend-engineer only)", "status": "pending", "activeForm": "Building and verifying translations"}, - {"content": "Run validation tools and fix all failures/warnings", "status": "pending", "activeForm": "Running validation tools"}, - {"content": "Test in browser with zero tolerance (frontend-engineer only)", "status": "pending", "activeForm": "Testing in browser"}, - {"content": "Fix any bugs discovered during validation/testing", "status": "pending", "activeForm": "Fixing bugs discovered"}, - {"content": "Update [task] status to [Review] and delegate to reviewer subagent (skip in standalone mode)", "status": "pending", "activeForm": "Updating status and calling reviewer"}, - {"content": "Check feature progress (skip in standalone mode/optional in agentic mode)", "status": "pending", "activeForm": "Checking feature progress"}, - {"content": "MANDATORY: Call CompleteWork after reviewer approval (skip in standalone mode)", "status": "pending", "activeForm": "Calling CompleteWork"} - ] -} -``` - -**After creating this template**: Remove todo items marked for a different engineer role. For example, if you're a backend-engineer, remove items containing "(frontend-engineer only)". - ---- - -## Workflow Steps - -**STEP 1**: Read [task] from [PRODUCT_MANAGEMENT_TOOL] and update status to [Active] - -**If `featureId` is NOT "ad-hoc" (regular task from a feature):** -1. Read [feature] from `featureId` in [PRODUCT_MANAGEMENT_TOOL] to understand the full PRD context -2. Read [task] from `taskId` in [PRODUCT_MANAGEMENT_TOOL] to get task details and subtask bullets -3. **Update [task] status to [Active]** in `[PRODUCT_MANAGEMENT_TOOL]` -4. **If [task] lookup fails** (not found, already completed, or error): This is a coordination error. Report a problem and call CompleteWork explaining the task could not be found. - -**If `featureId` is "ad-hoc" (ad-hoc work):** -- Skip [PRODUCT_MANAGEMENT_TOOL] operations -- Still follow full engineer → reviewer → commit cycle - -**After reading [task], unfold subtasks in todo:** - -1. Extract the subtask bullets from [task] description. -2. Replace the "Implement each subtask" todo item with: - - The task name as a parent item. - - Each subtask as an indented child item (using ├─ and └─ formatting). - -**Example:** -If task with title "Backend for user CRUD operations" has subtasks: -``` -- Create UserId strongly typed ID -- Create User aggregate -- Create IUserRepository interface and implementation -- Create API endpoint for create user -``` - -Replace the single "Implement each subtask" item with: -``` -Backend for user CRUD operations -├─ Create UserId strongly typed ID [pending] -├─ Create User aggregate [pending] -├─ Create IUserRepository interface and implementation [pending] -└─ Create API endpoint for create user [pending] -``` - -**STEP 2**: Understand the full feature context - -Before implementing, understand the big picture: - -1. **Read the [feature] from `featureId`** in [PRODUCT_MANAGEMENT_TOOL] (if not ad-hoc): - - Understand the overall problem being solved and how the proposed solution will solve it. - - Read the full PRD to understand business context. - -2. **Read ALL [task] titles** (not full descriptions) in the [feature] (if not ad-hoc): - - See the planned approach and implementation sequence. - - Understand what you're building in context of the [feature]. - -3. **Read YOUR [task] description carefully**: - - Already read in STEP 1, but review the subtask bullets. - - Tasks are complete vertical slices. - - Subtasks are already unfolded in your todo list (see STEP 1 above). - -The [feature] plan was AI-generated by tech-lead in a few minutes after interviewing the user. You have implementation time to consider the code carefully. You are the expert closest to the code. If something doesn't align with: -- Feature intent. -- Rules in the project. -- Patterns used in the solution. -- Architectural patterns. -- Best practices. -- Simpler approaches. - -**Question it.** Use report_problem or comment on the [task]. Better ideas from implementation phase should surface. - -**Collaborate with your team**: For complex problems or architectural decisions, engage in conversation with team members (use ad-hoc delegation to discuss with other engineers). Better solutions often emerge from team collaboration. - -**Note**: All architectural rules for your role are embedded in your system prompt and available for reference at all times. - -**STEP 3**: Research existing patterns for this [task] type - -Research the codebase to find similar implementations. Look for existing code that handles similar features, patterns, or business logic that can guide your implementation. - -**STEP 4**: Implement each subtask - -**Incremental development approach:** - -Since [tasks] are complete vertical slices, build and test incrementally as you work through each subtask. This prevents accumulating errors and makes debugging easier. - -**For EACH subtask in your todo:** - -1. **Mark subtask [in_progress]** in todo. -2. **Implement the subtask**. -3. **Build immediately**: - - Backend: `execute_command(command: "build", backend: true, selfContainedSystem: "{self-contained-system}")`. - - Frontend: `execute_command(command: "build", frontend: true, selfContainedSystem: "{self-contained-system}")`. - - Fix any build errors before proceeding. -4. **Test immediately** (backend only): - - `execute_command(command: "test", backend: true, selfContainedSystem: "{self-contained-system}")`. - - Fix any test failures before proceeding. -5. **Mark subtask [completed]** in todo. -6. **Move to next subtask**. - -**Why build/test after each subtask:** -- Catches errors early when context is fresh. -- Prevents error accumulation. -- Makes debugging faster. -- Ensures each piece works before moving on. -- Critical for larger tasks. - -**Do NOT run format/inspect after each subtask** - these are slow and run once at the end in STEP 6. - -**STEP 5**: Build and verify translations (frontend-engineer only) - -1. Run build to extract new translation strings to `*.po` files. -2. Find ALL empty translations: `grep -r 'msgstr ""' */WebApp/shared/translations/locale/*.po`. -3. Translate EVERY empty msgstr found (all languages: da-DK, nl-NL, etc.). -4. Use consistent domain terminology (check existing translations for guidance). - -**STEP 6**: Run validation tools and fix all failures/warnings - -**Zero tolerance for issues**: -- We deploy to production after review - quality is non-negotiable. -- **Boy Scout Rule**: Leave the codebase cleaner than you found it. -- Fix all failures, warnings, or problems anywhere in the system. -- This includes pre-existing issues unrelated to your changes. -- Don't request review with outstanding issues. - -**Inspect findings block merging**: If inspect returns "Issues found", the CI pipeline will fail and the code cannot be merged. The severity level (note/warning/error) is irrelevant - all findings must be fixed before requesting review. - -For **backend [tasks]**: -1. Run **inspect** for your self-contained system: `execute_command(command: "inspect", backend: true, selfContainedSystem: "{self-contained-system}")`. -2. Fix ALL failures found (zero tolerance). - -**Note**: Build and test were already run after each subtask in STEP 4. Backend-engineer does NOT run format - the reviewer will handle formatting before commit. - -For **frontend [tasks]**: -1. Run **build** for your self-contained system: `execute_command(command: "build", frontend: true, selfContainedSystem: "{self-contained-system}")`. -2. Run **format** for all self-contained systems: `execute_command(command: "format", frontend: true)`. -3. Run **inspect** for all self-contained systems: `execute_command(command: "inspect", frontend: true)`. -4. Fix ALL failures found (zero tolerance). - -**STEP 7**: Test in browser with zero tolerance (frontend-engineer only) - -**Required for frontend engineers** - -1. **Navigate to https://localhost:9000** and test ALL functionality: - - **Test the COMPLETE happy path** of the new feature from start to finish. - - **Test ALL edge cases**: validation errors, empty states, maximum values, special characters. - - **Test user scenarios**: What would a user actually do with this feature? - - **Take screenshots** and critically examine if everything renders with expected layout and styling. - - Test in **dark mode** and **light mode** (switch theme and verify UI renders correctly). - - Test **localization** (switch language if feature has translatable strings). - - Test **responsive behavior**: mobile size, small browser, large browser (resize and verify layout adapts). - - Verify UI components render correctly (spacing, alignment, colors, borders, fonts). - - Test all user interactions (clicks, forms, dialogs, navigation, keyboard navigation). - - **Document what you tested** in your response (which scenarios, which user flows, which modes tested). - - If website not responding, use **run** MCP tool to restart server. - -2. **Test with different user roles** (if applicable): - - Test as admin user: `admin@platformplatform.local` / `UNLOCK`. - - Test as non-admin user if feature has role-based access. - - Verify permissions and access controls work correctly. - -3. **Monitor Network tab** - Fix ALL issues: - - **Zero tolerance**: No failed requests, no 4xx/5xx errors. - - Check ALL API calls for the new feature execute successfully. - - No slow requests without explanation. - - Fix ANY network warnings or errors (even if pre-existing per Boy Scout rule). - -4. **Monitor Console tab** - Fix ALL issues: - - **Zero tolerance**: No console errors, no warnings. - - Fix ANY console errors or warnings (even if pre-existing per Boy Scout rule). - - Clear console and verify it stays clean during all interactions. - -5. **Login instructions**: - - Username: `admin@platformplatform.local`. - - Use `UNLOCK` for verification code (works on localhost only). - - If user doesn't exist: Sign up for a new tenant, use `UNLOCK` for verification code. - -**Boy Scout Rule**: Leave the codebase cleaner than you found it. If you see pre-existing console errors or network warnings unrelated to your changes, FIX THEM. Zero tolerance means ZERO - not "only for my changes". - -**STEP 8**: Fix any bugs discovered during validation/testing - -If you discover bugs during testing or validation (API errors, broken functionality, console errors, broken UI, test failures), fix them before requesting review. Don't request review with known bugs. - -**If bug is in existing code (not your changes)**: -1. Stash only your changes: `git stash push -- ` (don't include changes from other engineers working in parallel). -2. Verify the bug exists on clean code. -3. **Agentic mode**: Fix yourself if within your specialty OR delegate to engineer subagent if outside your specialty (use "ad-hoc" taskId). - **Standalone mode**: Fix it yourself or inform user that the bug requires different expertise. -4. Follow STEP 10 to delegate to reviewer and get the fix committed. -5. `git stash pop` to restore your changes and continue. - -**If you see errors that might be from parallel engineer's changes**: -- Check `git log --oneline` to see recent commits and understand what parallel engineer is working on. -- If recent commits exist: Sleep 5 minutes, then re-test (parallel engineer may be fixing it). -- If issue persists after 10-15 minutes: Delegate to that engineer or fix yourself if within specialty. - -**Valid Solutions When Stuck**: -- Fix the bug yourself if it's within your specialty (your role boundaries). -- Delegate to appropriate engineer if bug is outside your specialty (use start_worker_agent with ad-hoc taskId). -- **Revert your changes** if solution is too complex - revert all git changes, fix pre-existing problems first, then re-implement cleanly. - -**STEP 9**: Update [task] status to [Review] and delegate to reviewer subagent (skip in standalone mode) - -**Before calling reviewer (every time, including re-reviews)**: - -**1. Update [task] status to [Review]** in [PRODUCT_MANAGEMENT_TOOL] (if featureId is NOT "ad-hoc"): - - This applies to EVERY review request, not just the first one. - - When reviewer rejects and moves status to [Active], you MUST move it back to [Review] when requesting re-review. - - Skip this only for ad-hoc work (featureId is "ad-hoc"). - -**2. Zero tolerance verification**: Confirm ALL validation tools pass with ZERO failures/warnings. NEVER request review with ANY outstanding issues - we deploy to production after review. - -**3. Identify your changed files**: -- Run `git status --porcelain` to see ALL changed files. -- Identify YOUR files (files you created/modified for THIS task): - - **Backend engineers**: MUST include `*.Api.json` files. These are auto-generated TypeScript types from your C# API endpoints, placed in WebApp/shared/lib/api/ for frontend consumption, but owned by backend. - - **Frontend engineers**: MUST exclude `*.Api.json` files (these belong to backend, not you). - - Don't forget `.po` translation files. - - Exclude files from parallel engineers (different agent types). - - If you changed files outside your scope: `git restore ` to revert. -- **CRITICAL for backend engineers**: Check `git status` for any `*.Api.json` files and include them in your file list. -- List YOUR files in "Files Changed" section (one per line with status). - -Delegate to reviewer subagent: - -**Delegation format**: -``` -[One short sentence: what you implemented or fixed] - -## Files Changed -- path/to/file1.tsx -- path/to/file2.cs -- path/to/translations.po - -Request: {requestFilePath} -Response: {responseFilePath} - -[If working in parallel: Include parallel work notification from coordinator, e.g., "⚠️ Parallel Work: Frontend-engineer is working in parallel on {task-title}"] -``` - -**MCP call parameters**: -- `agentType`: backend-reviewer, frontend-reviewer, or qa-reviewer -- `taskTitle`: From current-task.json -- `markdownContent`: Your delegation message above -- `branch`: From current-task.json -- `featureId`: From current-task.json -- `taskId`: From current-task.json -- `requestFilePath`: From current-task.json -- `responseFilePath`: From current-task.json - -**Review loop**: -- If reviewer returns NOT APPROVED → Fix issues → Update [task] status to [Review] → Call reviewer subagent again. -- If reviewer returns APPROVED → Check YOUR files (not parallel engineers' files) are committed → Proceed to completion. -- Don't call CompleteWork unless reviewer approved and committed your code. -- Don't commit code yourself - only the reviewer commits. -- If rejected 3+ times with same feedback despite validation tools passing: Report problem with severity: error, then stop. Don't call CompleteWork, don't proceed with work - the user will take over manually. - -**STEP 10**: Check feature progress (skip in standalone mode/optional in agentic mode) - -**If `featureId` is NOT "ad-hoc" (regular task from a feature):** -- Optionally check if there are more [tasks] remaining in the [feature]. -- This helps provide context in your completion message. - -**If `featureId` is "ad-hoc" (ad-hoc work):** -- Skip (no [feature] to check). - -**STEP 11**: Call CompleteWork after reviewer approval (skip in standalone mode) - -After completing all work and receiving reviewer approval, call the MCP **CompleteWork** tool with `mode: "task"` to signal completion. This tool call will terminate your session. - -CompleteWork requires reviewer approval and committed code. - -Call CompleteWork after reviewer approval, even if this is the last [task] in a [feature]. - -**Before calling CompleteWork**: -1. Ensure all work is complete and all todos are marked as completed. -2. Write a comprehensive response (what you accomplished, notes for Coordinator). -3. Create an objective technical summary in sentence case (like a commit message). -4. Reflect on your experience and write categorized feedback using prefixes: - - `[system]` - Workflow, MCP tools, agent coordination, message handling. - - `[requirements]` - Requirements clarity, acceptance criteria, task description. - - `[code]` - Code patterns, rules, architecture guidance. - - Examples: - - `[system] CompleteWork returned errors until title was less than 100 characters - consider adding format description`. - - `[requirements] Task mentioned Admin but unclear if TenantAdmin or WorkspaceAdmin`. - - `[code] No existing examples found for implementing audit logging in this context`. - - You can provide multiple categorized items. Use report_problem for urgent system bugs during work. - -**Call MCP CompleteWork tool**: -- `mode`: "task" -- `agentType`: Your agent type (backend-engineer, frontend-engineer, or qa-engineer) -- `taskSummary`: Objective technical description of what was implemented (imperative mood, sentence case). Examples: "Add user role endpoints with authorization", "Implement user avatar upload", "Fix null reference in payment processor". NEVER use subjective evaluations like "Excellent implementation" or "Clean code". -- `responseContent`: Your full response in markdown -- `feedback`: Mandatory categorized feedback using [system], [requirements], or [code] prefixes as described above - -⚠️ Your session terminates IMMEDIATELY after calling CompleteWork - ---- - -## REMINDER: Use Exact TodoWrite JSON - -**✅ DO: Copy the JSON from STEP 2**. - -**❌ DON'T: Create custom todo format**. diff --git a/.windsurf/workflows/process/review-end-to-end-tests.md b/.windsurf/workflows/process/review-end-to-end-tests.md deleted file mode 100644 index 7b08250fcf..0000000000 --- a/.windsurf/workflows/process/review-end-to-end-tests.md +++ /dev/null @@ -1,206 +0,0 @@ ---- -description: Review end-to-end test implementation for a [task] -auto_execution_mode: 3 ---- - -# Review E2E Tests Workflow - -You are reviewing: **{{{title}}}** - -**Agentic vs standalone mode:** Your system prompt will explicitly state if you are in *agentic mode*. Otherwise, assume *standalone mode* and skip steps marked "(skip in standalone mode)". - -- **Agentic mode**: The review request comes from `current-task.json`. The CLI passes only the task title as the slash command argument. You run autonomously without human supervision - work with your team to find solutions. -- **Standalone mode**: Test files are passed as command arguments `{{{title}}}`. Read test files from user-provided paths or from `git status`. - -## Review Principles - -**Zero Tolerance for Test Quality**: E2E tests must be perfect. ALL tests must pass, ZERO console errors, ZERO network errors, NO sleep statements. There are no exceptions. - -**Evidence-Based Reviews**: Every finding must be backed by rules in `/.windsurf/rules/end-to-end-tests/end-to-end-tests.md` or established patterns in the codebase. - -**Speed is Critical**: Tests must run fast. Reject tests that are unnecessarily slow or create too many small test files. - ---- - -## STEP 0: Mandatory Preparation - -1. **Read [PRODUCT_MANAGEMENT_TOOL]-specific guide** at `/.windsurf/reference/product-management/[PRODUCT_MANAGEMENT_TOOL].md` to understand terminology, status mapping, ID format, and MCP configuration. - -2. **Read `current-task.json` from `.workspace/agent-workspaces/{branch-name}/{agent-type}/current-task.json`** to get: - - `requestFilePath`: Request file path (contains engineer's request message) - - `responseFilePath`: Response file path (where you'll write your review outcome) - - `featureId`: [FeatureId] (the feature this task belongs to, or "ad-hoc" for ad-hoc work) - - `taskId`: [TaskId] (the task being reviewed, or "ad-hoc-yyyyMMdd-HHmm" for ad-hoc work) - - `taskTitle`: Task title - -3. **Read the request file** from the path in `requestFilePath`. - -4. **Read all files referenced in the engineer's request** (test files, implementation details, etc.). - -5. **Create Todo List** - -**CALL TodoWrite TOOL WITH THIS EXACT JSON - COPY AND PASTE**: - -```json -{ - "todos": [ - {"content": "Read [feature] and [task] to understand requirements", "status": "pending", "activeForm": "Reading feature and task"}, - {"content": "Run feature-specific e2e tests", "status": "pending", "activeForm": "Running feature E2E tests"}, - {"content": "Review test file structure and organization", "status": "pending", "activeForm": "Reviewing test structure"}, - {"content": "Review each test step for correct patterns", "status": "pending", "activeForm": "Reviewing test steps"}, - {"content": "Review test efficiency and speed", "status": "pending", "activeForm": "Reviewing test efficiency"}, - {"content": "Make binary decision (approve or reject)", "status": "pending", "activeForm": "Making decision"}, - {"content": "If approved, run full regression test suite", "status": "pending", "activeForm": "Running full regression tests"}, - {"content": "If approved, commit changes", "status": "pending", "activeForm": "Committing if approved"}, - {"content": "Update [task] status to [Completed] or [Active]", "status": "pending", "activeForm": "Updating task status"}, - {"content": "MANDATORY: Call CompleteWork", "status": "pending", "activeForm": "Calling CompleteWork"} - ] -} -``` - ---- - -## Workflow Steps - -**STEP 1**: Read [feature] and [task] to understand requirements - -1. **Read the [feature]** from `featureId` in [PRODUCT_MANAGEMENT_TOOL] (if not ad-hoc): - - Understand the overall problem and solution approach. - -2. **Read the [task]** from `taskId` in [PRODUCT_MANAGEMENT_TOOL]: - - Read the task description carefully. - - Understand what tests should cover. - -3. **Read engineer's request** to understand what tests were created. - -**If [task] lookup fails** (not found, already completed, or error): This is a coordination error. Report a problem and reject the review explaining the task could not be found. - -4. **Study E2E rules**: - - Read [End-to-End Tests](/.windsurf/rules/end-to-end-tests/end-to-end-tests.md) - - Ensure engineer followed all patterns - -**STEP 2**: Run feature-specific e2e tests first - -**If tests require backend changes, run the run tool first**: -- Use **run MCP tool** to restart server and run migrations -- The tool starts .NET Aspire at https://localhost:9000 - -**Run feature-specific E2E tests**: -- Use **end-to-end MCP tool** to run tests: `end-to-end(searchTerms=["feature-name"])` -- **ALL tests MUST pass with ZERO failures to approve** -- **Verify ZERO console errors** during test execution -- **Verify ZERO network errors** (no unexpected 4xx/5xx responses) -- If ANY test fails: REJECT -- If ANY console errors: REJECT -- If ANY network errors: REJECT - -**STEP 3**: Review test file structure and organization - -**Critical Check 1 - Test Count:** -- Normally ONE new `@comprehensive` test per feature -- Existing `@smoke` tests should be updated, not duplicated -- For BIG features: Allow both new `@smoke` and new `@comprehensive` -- **Reject if too many small test files created** - -**STEP 4**: Review each test step for correct patterns - -**Critical Check 1 - Step Naming Pattern:** -- **EVERY step MUST follow**: "Do something & verify result" -- ✅ Good: `"Submit login form & verify authentication"` -- ❌ Bad: `"Verify button is visible"` (no action) -- ❌ Bad: `"Test login"` (uses "test" prefix) -- **Reject if steps don't follow pattern** - -**Critical Check 2 - No Sleep Statements:** -- Search for: `waitForTimeout`, `sleep`, `delay`, `setTimeout` -- **Reject if found—no exceptions** -- Playwright auto-waits—sleep is NEVER needed in any scenario -- Demand Playwright await assertions instead: - - Use `toBeVisible()`, `toHaveURL()`, `toContainText()`, etc. - - These built-in auto-wait mechanisms handle all timing scenarios - -**STEP 5**: Review test efficiency and speed - -**Critical Check 1 - Leverage Existing Logic:** -- Verify tests use fixtures: `{ page }`, `{ ownerPage }`, `{ adminPage }`, `{ memberPage }` -- Verify tests use helpers: `expectToastMessage`, `expectValidationError`, etc. -- **Reject if tests duplicate existing logic** - -**Critical Check 2 - Speed Optimization:** -- Tests should test MANY things in FEW steps -- Avoid excessive navigation or setup -- Group related scenarios together -- **Reject if tests are unnecessarily slow** - -**STEP 6**: Make binary decision (approve or reject) - -**Aim for perfection, not "good enough".** - -**APPROVED only if ALL criteria met:** -- ✓ All E2E tests passed with zero failures -- ✓ Zero console errors during test execution -- ✓ Zero network errors during test execution -- ✓ No sleep statements found -- ✓ All steps follow "Do something & verify result" pattern -- ✓ Tests use existing fixtures and helpers -- ✓ Tests are efficient and fast - -**Reject if any issue exists—no exceptions. Common rationalizations to avoid:** -- ✗ "Test failed but feature works manually" → Reject, fix test -- ✗ "Console error unrelated to E2E code" → Reject anyway -- ✗ "It's just a warning" → Reject, zero means zero -- ✗ "Previous test run passed" → Reject anyway if current run has issues - -**When rejecting:** Do full review first, then reject with ALL issues listed (avoid multiple rounds). - -**STEP 7**: If approved, run full regression test suite - -**Before committing, run all e2e tests to ensure no regressions:** -- Use **end-to-end MCP tool** WITHOUT searchTerms: `end-to-end()` -- This runs the complete test suite across all browsers -- **ALL tests MUST pass with ZERO failures** -- If ANY test fails: REJECT (do not commit) - -**STEP 8**: Commit changes - -1. Stage test files: `git add ` for each test file -2. Commit: One line, imperative form, no description, no co-author -3. Get hash: `git rev-parse HEAD` - -Don't use `git add -A` or `git add .` - -**STEP 9**: Update [task] status to [Completed] or [Active] - -**If `featureId` is NOT "ad-hoc" (regular task from a feature):** -- If APPROVED: Update [task] status to [Completed]. -- If REJECTED: Update [task] status back to [Active]. - -**If `featureId` is "ad-hoc" (ad-hoc work):** -- Skip [PRODUCT_MANAGEMENT_TOOL] status updates. - -**STEP 10**: Call CompleteWork - -**Call MCP CompleteWork tool**: -- `mode`: "review" -- `agentType`: qa-reviewer -- `commitHash`: Commit hash if approved, null/empty if rejected -- `rejectReason`: Rejection reason if rejected, null/empty if approved -- `responseContent`: Your full review feedback -- `feedback`: Mandatory categorized feedback using prefixes: - - `[system]` — Workflow, MCP tools, agent coordination, message handling - - `[requirements]` — Requirements clarity, acceptance criteria, task description - - `[code]` — Code patterns, rules, architecture guidance - - Examples: `[system] end-to-end MCP tool reported test passed but it actually failed` or `[requirements] Feature requirements didn't specify mobile viewport testing` - -⚠️ Your session terminates IMMEDIATELY after calling CompleteWork. - ---- - -## Rules - -1. **Tests must pass** — Don't approve failing tests -2. **No sleep statements** — Non-negotiable -3. **Follow step pattern** — Every step needs action + verification -4. **One test per feature** — Avoid test proliferation -5. **Speed matters** — Reject slow, inefficient tests diff --git a/.windsurf/workflows/process/review-task.md b/.windsurf/workflows/process/review-task.md deleted file mode 100644 index dfc9af4e08..0000000000 --- a/.windsurf/workflows/process/review-task.md +++ /dev/null @@ -1,472 +0,0 @@ ---- -description: Review a specific [task] implementation from a [feature] following the systematic review workflow -auto_execution_mode: 3 ---- - -# Review Task Workflow - -You are reviewing: **{{{title}}}** - -**Agentic vs standalone mode:** Your system prompt will explicitly state if you are in *agentic mode*. Otherwise, assume *standalone mode* and skip steps marked "(skip in standalone mode)". - -- **Agentic mode**: The review request comes from `current-task.json`. The CLI passes only the task title as the slash command argument. You run autonomously without human supervision - work with your team to find solutions. -- **Standalone mode**: Review request is passed as command arguments `{{{title}}}`. Read changed files from `git status` or user-provided list. - -## Review Principles - -**Devil's Advocate Mindset**: Your job is to validate the engineer's work by actively searching for problems. Look for inconsistencies, deviations, and potential issues. - -**Zero Tolerance**: ALL findings must be fixed, regardless of severity. Never dismiss issues as "minor" or "not worth fixing". Every deviation from rules or established patterns must be addressed. - -**Evidence-Based Reviews**: Every finding must be backed by: -1. Explicit rules from `.windsurf/rules/` files, OR -2. Established patterns found elsewhere in the codebase (cite specific file:line examples), OR -3. Well-established ecosystem conventions (e.g., .NET interfaces prefixed with `I`) - -Avoid subjective personal preferences. - -**Line-by-Line Review**: Like GitHub PR reviews - comment ONLY on specific file:line combinations that have issues. NO comments on correct code. NO commentary on what was done well. - -**Objective Language**: State facts about rule violations or pattern deviations. Reference specific rules or codebase examples. Avoid subjective evaluations or praise. - -**Concise Communication**: Minimize token usage for the engineer. Focus only on what needs fixing. - ---- - -## STEP 0: Mandatory Preparation - -1. **Read [PRODUCT_MANAGEMENT_TOOL]-specific guide** at `/.windsurf/reference/product-management/[PRODUCT_MANAGEMENT_TOOL].md` to understand terminology, status mapping, ID format, and MCP configuration. - -2. **Read `current-task.json` from `.workspace/agent-workspaces/{branch-name}/{agent-type}/current-task.json`** to get: - - `requestFilePath`: Request file path (contains engineer's request message) - - `responseFilePath`: Response file path (where you'll write your review outcome) - - `featureId`: [FeatureId] (the feature this task belongs to, or "ad-hoc" for ad-hoc work) - - `taskId`: [TaskId] (the task being reviewed, or "ad-hoc-yyyyMMdd-HHmm" for ad-hoc work) - - `taskTitle`: Task title - -3. **Read the request file** from the path in `requestFilePath`. - -4. **Read all files referenced in the engineer's request** (implementation details, changed files, etc.). - -5. **Create Todo List** - -**CALL TodoWrite TOOL WITH THIS EXACT JSON - COPY AND PASTE**: - -```json -{ - "todos": [ - {"content": "Read [feature] and [task] to understand requirements", "status": "pending", "activeForm": "Reading feature and task"}, - {"content": "Create checklist of all requirements from [task] description", "status": "pending", "activeForm": "Creating requirements checklist"}, - {"content": "Run validation tools in parallel (format, test, inspect)", "status": "pending", "activeForm": "Running validation tools"}, - {"content": "Verify translations (frontend-reviewer only)", "status": "pending", "activeForm": "Verifying translations"}, - {"content": "Test in browser with zero tolerance (frontend-reviewer only)", "status": "pending", "activeForm": "Testing in browser"}, - {"content": "Review changed files one-by-one", "status": "pending", "activeForm": "Reviewing files"}, - {"content": "Review high-level architecture", "status": "pending", "activeForm": "Reviewing architecture"}, - {"content": "Verify all requirements met with tests", "status": "pending", "activeForm": "Verifying requirements"}, - {"content": "If approved, commit changes (or reject if any issues found)", "status": "pending", "activeForm": "Committing changes or rejecting"}, - {"content": "Update [task] status to [Completed] or [Active]", "status": "pending", "activeForm": "Updating task status"}, - {"content": "MANDATORY: Call CompleteWork", "status": "pending", "activeForm": "Calling CompleteWork"} - ] -} -``` - -**After creating this template**: Remove todo items marked for a different reviewer role. For example, if you're a backend-reviewer, remove items containing "(frontend-reviewer only)". - -**After creating base todo, unfold "Review changed files one-by-one":** - -1. Get list of changed files from engineer's request (NOT from git status). -2. Replace the single "Review changed files" item with individual file review items. -3. Use tree format (├─ and └─). - -**Example:** -``` -Review changed files one-by-one -├─ Read and review User.cs [pending] -├─ Read and review UserRepository.cs [pending] -├─ Read and review CreateUserCommand.cs [pending] -└─ Read and review UsersEndpoint.cs [pending] -``` - ---- - -## Workflow Steps - -**STEP 1**: Read [feature] and [task] to understand requirements - -1. **Read the [feature]** from `featureId` in [PRODUCT_MANAGEMENT_TOOL] (if not ad-hoc): - - Understand the overall problem and solution approach. - -2. **Read the [task]** from `taskId` in [PRODUCT_MANAGEMENT_TOOL]: - - Read the task description carefully. - - Note all subtask bullets (implementation steps). - -3. **Read engineer's request and response files** to understand what was actually implemented. - -**If [task] lookup fails** (not found, already completed, or error): This is a coordination error. Report a problem and reject the review explaining the task could not be found. - -**STEP 2**: Create checklist of all requirements from [task] description - -Extract ALL business rules, edge cases, and validations from task description: - - What are the business rules? (uniqueness, permissions, constraints). - - What validations are required? - - What edge cases must be handled? - - What should NOT be allowed? - - What are the tenant isolation requirements? - -**Example requirements checklist (focus on details, not obvious structure):** -``` -Business rules and validations: -- [ ] Email must be unique within tenant (not globally). -- [ ] Email validation (valid format). -- [ ] Only Tenant Owners can create users. -- [ ] Full name max length ≤ 100 characters. -- [ ] Cannot delete last Owner in tenant. -- [ ] Soft delete (not hard delete). -- [ ] Tenant isolation (users scoped to tenant). -- [ ] Max 3 tenant owners on a tenant. -... - -Edge cases and error handling: -- [ ] Test duplicate email rejection. -- [ ] Test invalid email format. -- [ ] Test non-owner attempting create (403 Forbidden). -- [ ] Test deleting last owner (should fail). -- [ ] Test name > 100 chars validation. -- [ ] Test creating user in different tenant (isolation). -... -``` - -This checklist focuses on non-obvious requirements that reviewers often miss. - -4. **Read engineer's request and response files** to understand what was actually implemented. - -The [feature] plan was AI-generated by tech-lead in a few minutes after interviewing the user. Engineers spend implementation time considering the code carefully. You are the expert reviewer. If implementation or task design doesn't align with: -- Feature intent. -- Rules in the project. -- Patterns used in the solution. -- Architectural patterns. -- Best practices. -- Simpler approaches. - -**Reject and provide guidance.** Better ideas from review phase should surface. - -**Collaborate with your team**: For complex problems or design questions, engage in conversation with engineers or other reviewers. Better solutions often emerge from team collaboration. - -**STEP 3**: Run validation tools - -**Zero tolerance for issues**: -- We deploy to production after review - quality is non-negotiable. -- **Boy Scout Rule**: The codebase must be cleaner than before. -- Reject if any failures, warnings, or problems exist anywhere in the system. -- This includes pre-existing issues unrelated to engineer's changes. -- Don't approve code with outstanding issues. -- Infrastructure failures (MCP errors, tools fail) → Reject, report problem, do not approve. - -**Inspect findings block merging**: If inspect returns "Issues found", the CI pipeline will fail and the code cannot be merged. The severity level (note/warning/error) is irrelevant - all findings must be fixed before approval. - -**For backend-reviewer** (validates all self-contained systems to catch cross-self-contained-system breakage): - -1. Run **build**, **format**, **test**, **inspect** following the global tool execution instructions. - -2. Handle validation results: - - **If NO parallel work notification in request**: REJECT if ANY failures found (zero tolerance). - - **If parallel work notification present** (e.g., "⚠️ Parallel Work: Frontend-engineer..."): - - REJECT if backend failures found (Core/, Api/, Tests/, Database/). - - IGNORE frontend failures (WebApp/) unless caused by backend API contract changes. - - If frontend failures seem related to backend API changes: Note in rejection that frontend-engineer may need to adapt. - -**For frontend-reviewer** (validates frontend only): - -1. Run **build**, **format**, **inspect** for frontend following the global tool execution instructions. - -2. Handle validation results: - - **If NO parallel work notification in request**: REJECT if ANY failures found (zero tolerance). - - **If parallel work notification present** (e.g., "⚠️ Parallel Work: Backend-engineer..."): - - REJECT if frontend failures found (WebApp/). - - IGNORE backend failures (Core/, Api/, Tests/) unless caused by frontend breaking the API contract. - - If backend failures seem related to API integration: Note in rejection. - -**For qa-reviewer** (validates E2E tests): - -1. Run **build** for frontend, then run **end-to-end** tests following the global tool execution instructions. - -2. REJECT if ANY failures found (zero tolerance). - -**If validation fails with errors unrelated to engineer's changes**: -- Check `git log --oneline` for recent parallel engineer commits. -- If recent commits exist: Sleep 5 minutes, re-run validation. -- If issue persists: REJECT. Per Boy Scout Rule, the engineer is responsible for fixing ALL issues found, even pre-existing ones. - -**Note**: All architectural rules for your role are embedded in your system prompt and available for reference at all times. - -**STEP 4**: Verify translations (frontend-reviewer only) - -Check all `*.po` files for empty `msgstr ""` entries and inconsistent domain terminology. Reject if translations are missing or terminology differs from established usage elsewhere. - -**STEP 5**: Test in browser with zero tolerance (frontend-reviewer only) - -**Required for frontend reviewers** - -If infrastructure issues prevent testing: Try to recover (use run MCP tool to restart server, retry browser). If recovery fails, complete the rest of your review, then reject with all findings including the infrastructure issue. Report problem for infrastructure failures. - -1. **Navigate to https://localhost:9000** and test ALL functionality: - - **Test the COMPLETE happy path** of the new feature from start to finish. - - **Test ALL edge cases**: validation errors, empty states, maximum values, special characters, boundary conditions. - - **Test user scenarios**: What would a user actually do with this feature? Try to break it. - - **Take screenshots** and critically examine if everything renders with expected layout and styling. - - Test in **dark mode** and **light mode** (switch theme and verify UI renders correctly). - - Test **localization** (switch language if feature has translatable strings). - - Test **responsive behavior**: mobile size, small browser, large browser (resize and verify layout adapts). - - Verify engineer documented what they tested - if not documented, REJECT. - - If website not responding, use **run** MCP tool to restart server. - -2. **Test with different user roles** (CRITICAL): - - Test as admin: `admin@platformplatform.local` / `UNLOCK`. - - **Test as non-admin user** if feature has role-based behavior. - - Verify permissions, access controls, and role-specific UI elements work correctly. - - REJECT if role-based features not tested with appropriate roles. - -3. **Monitor Network tab** - REJECT if ANY issues found: - - **Zero tolerance**: No failed requests, no 4xx/5xx errors. - - Check ALL API calls for the new feature execute successfully. - - No slow requests without explanation. - - REJECT if ANY network warnings or errors found (even pre-existing per Boy Scout rule). - - ✗ BAD: "500 error is backend problem" → REJECT ANYWAY. - - ✗ BAD: "Network error unrelated to my changes" → REJECT ANYWAY. - -4. **Monitor Console tab** - REJECT if ANY issues found: - - **Zero tolerance**: No console errors, no warnings. - - REJECT if ANY console errors or warnings found (even pre-existing per Boy Scout rule). - - Clear console and verify it stays clean during all interactions. - - ✗ BAD: "Warning unrelated to my code" → REJECT ANYWAY. - - ✗ BAD: "HMR error, not my problem" → REJECT ANYWAY. - -5. **Analyze screenshots for UI quality** (take screenshots of new UI): - - Check spacing, sizing, alignment, borders match design patterns. - - Verify responsive behavior (resize browser, test mobile viewport). - - Check color contrast, typography, visual hierarchy. - - REJECT if UI elements are misaligned, poorly spaced, or inconsistent. - - AI is bad at visual design - use your human judgment on screenshots. - -6. **Login instructions**: - - Username: `admin@platformplatform.local`. - - Use `UNLOCK` for verification code (works on localhost only). - - If user doesn't exist: Sign up for a new tenant, use `UNLOCK` for verification code. - -If you discover bugs during testing (API errors, broken functionality, console errors, network errors), reject. Zero tolerance means reject on any issue found. - -**Boy Scout Rule**: If you find pre-existing issues unrelated to engineer's changes, REJECT and require engineer to fix them. Zero tolerance means ZERO - not "only for my changes". - -**STEP 6**: Review changed files one-by-one - -**Review files individually, not in bulk:** - -For EACH file in your unfolded todo: -1. **Mark file [in_progress]** in todo. -2. **Read the ENTIRE file** using Read tool. -3. **Review line-by-line** against rules and patterns: - - Does it follow architectural patterns? (check similar files in codebase). - - Are there any rule violations or pattern deviations? - - Document findings: cite specific file:line + rule/pattern violated. -4. **Update todo item with result and mark [completed]**: - - If file has issues: Change to "Read and review FileName.cs (Issues found)". - - If file is clean: Change to "Read and review FileName.cs (Approved)". -5. **Move to next file**. - -**Example todo progression:** -``` -☒ ├─ Read and review TeamEndpoints.cs (Approved) -☒ ├─ Read and review CreateTeam.cs (Issues found) -☐ ├─ Read and review DeleteTeam.cs -``` - -**Why one-by-one:** -- Ensures thorough review of each file. -- Prevents missing details in bulk reviews. -- Critical for larger tasks. - -Play the devil's advocate, and reject if you find ANY small thing that is objectively not correct. - -**STEP 7**: Review high-level architecture - -After reviewing all individual files, step back and review the overall design: - -1. **Verify the implementation approach** makes sense: - - Are entities/aggregates designed correctly? - - Do commands/queries follow CQRS patterns? - - Are API contracts well-designed? - - Does the UI architecture follow patterns (frontend)? - -2. **Check cross-file consistency**: - - Do all pieces work together correctly? - - Are naming conventions consistent? - - Is the data flow logical? - -3. **Verify it solves the business problem**: - - Does this implementation actually deliver what the [task] requires? - - Are there simpler approaches? - -Play the devil's advocate, and reject if you find ANY small thing that is objectively not correct. - -**Update todo item:** -- Change to "Review high-level architecture (Approved)" or "(Issues found)". -- Mark as [completed]. - -**STEP 8**: Verify all requirements met with tests - -**Go through your requirements checklist from STEP 1 systematically:** - -For EACH business rule: -1. **Find the implementation** - Search the reviewed files for where this rule is enforced. -2. **Find the test** - Search test files for test covering this rule. -3. **Verify edge case coverage** - Does the test check boundary conditions, error paths? - -**For EACH validation:** -1. **Verify it exists** - Is the validation implemented? -2. **Verify error message** - Does it return proper error response? -3. **Verify test coverage** - Is there a test proving it rejects invalid input? - -**For EACH permission check:** -1. **Verify guard exists** - Is permission checked in command/endpoint? -2. **Verify correct roles** - Does it check the right role (Owner, Admin, Member)? -3. **Verify test coverage** - Is there a test proving unauthorized access is rejected (403)? - -If any requirement is missing, not implemented correctly, or not tested, reject with specific gaps. - -**Example verification:** -``` -Requirements verification: -✓ Email unique within tenant - Implemented in User.cs:45, tested in CreateUserTests.cs:120. -✗ Only Owners can create - No permission guard found in CreateUserCommand. -✗ Cannot delete last Owner - Implementation exists in DeleteUserCommand.cs:67 but NO TEST. -✗ Tenant isolation - Tests only check happy path, missing test for cross-tenant access. - -REJECT: Missing permission guard for create. Missing test for last-owner protection. Missing tenant isolation test. -``` - -**Update todo item:** -- Change to "Verify all requirements met with tests (Approved)" or "(Requirements missing)". -- Mark as [completed]. - -**STEP 9**: If approved, commit changes (or reject if any issues found) - -**Aim for perfection, not "good enough".** - -By this point, you've already marked each file, architecture, and requirements as "(Approved)" or "(Issues found)". Now make the final decision: - -**APPROVED only if ALL criteria met:** -- ✓ All validation tools passed (build, format, test, inspect). -- ✓ Browser testing completed successfully (frontend only). -- ✓ Zero console errors or warnings. -- ✓ Zero network errors (no 4xx, no 5xx). -- ✓ No skipped mandatory steps for ANY reason. -- ✓ All code follows rules and patterns. -- ✓ Pre-existing issues fixed (Boy Scout Rule). -- ✓ All files marked "(Approved)". -- ✓ Architecture marked "(Approved)". -- ✓ Requirements marked "(Approved)". - -**Reject if any issue exists - no exceptions. Common rationalizations to avoid:** -- ✗ "Backend issue, not frontend problem" → Reject anyway. -- ✗ "Previous review verified it" → Reject anyway. -- ✗ "Validation tools passed" → Not enough if browser has errors. -- ✗ "Infrastructure/MCP issue" → Reject anyway, report problem. -- ✗ "Pre-existing problem" → Reject anyway per Boy Scout Rule. -- ✗ "It's just a warning" → Reject, zero means zero. - -**When rejecting:** Do full review first, then reject with ALL issues listed (avoid multiple rounds). Skip to STEP 9 to update status, then STEP 10 to call CompleteWork. - -**If APPROVED, proceed with commit:** - -1. Identify files to commit from review context: - - Run `git status --porcelain` to see all changed files - - Filter to YOUR scope only: - - **Backend reviewer**: Api/Core/Tests files + `*.Api.json` files (auto-generated, in WebApp folder) - - **Frontend reviewer**: WebApp files + `*.po` files (auto-generated) EXCEPT `*.Api.json` files -2. Stage files: `git add ` for each file -3. Commit: One line, imperative form, no description, no co-author -4. Get hash: `git rev-parse HEAD` - -Don't use `git add -A` or `git add .` - -**STEP 10**: Update [task] status to [Completed] or [Active] - -**If `featureId` is NOT "ad-hoc" (regular task from a feature):** -- If APPROVED: Update [task] status to [Completed]. -- If REJECTED: Update [task] status back to [Active]. - -**If `featureId` is "ad-hoc" (ad-hoc work):** -- Skip [PRODUCT_MANAGEMENT_TOOL] status updates. - -**STEP 11**: Call CompleteWork - -Call MCP **CompleteWork** tool with `mode: "review"` - your session terminates after this call. - -**Categorized Feedback Required**: -Use category prefixes for all feedback: -- `[system]` - Workflow, MCP tools, agent coordination, message handling. -- `[requirements]` - Requirements clarity, acceptance criteria, task description. -- `[code]` - Code patterns, rules, architecture guidance. - -Examples: -- `[system] Validation tools reported stale results from previous run`. -- `[requirements] Engineer's file list didn't match git status - unclear which files were in scope`. -- `[code] Missing examples for implementing telemetry in this pattern`. - -**For APPROVED reviews**: -- Provide: `mode: "review"`. -- Provide: `commitHash` (from `git rev-parse HEAD` in STEP 8). -- Provide: `rejectReason` as null or empty string. -- Provide: `feedback`: Mandatory categorized feedback using [system], [requirements], or [code] prefixes. - -**For REJECTED reviews**: -- Provide: `mode: "review"`. -- Provide: `commitHash` as null or empty string. -- Provide: `rejectReason` (sentence case, imperative mood). -- Provide: `feedback`: Mandatory categorized feedback using [system], [requirements], or [code] prefixes. - ---- - -## Response Format Requirements - -When calling CompleteWork with `responseContent`: - -**For REJECTED reviews**: - -```markdown -[Short objective summary of why rejected - 1-2 sentences or short paragraph if more elaboration needed] - -## Issues - -### File.cs:Line -[Objective description of problem] -- **Rule/Pattern**: [Reference to .windsurf/rules/X.md or pattern from codebase] -- **Fix**: [Optional: Suggest specific change] - -### AnotherFile.cs:Line -[Objective description of problem] -- **Rule/Pattern**: [Reference] -- **Fix**: [Optional] -``` - -**For APPROVED reviews**: - -```markdown -[One sentence objective explanation of why approved, e.g., "Follows established patterns for X and complies with rules Y and Z"] -``` - -**Requirements**: -- Line-by-line review like GitHub PR. -- NO comments on correct code. -- NO subjective language ("excellent", "great", "well done"). -- NO dismissing issues as "minor" or "optional". -- Cite specific rules or codebase patterns. -- Keep responses concise to minimize token usage. - ---- - -## REMINDER: Use Exact TodoWrite JSON - -**✅ DO: Copy JSON from above**. - -**❌ DON'T: Create custom format**. diff --git a/README.md b/README.md index 7bd9e41e54..afff830edb 100644 --- a/README.md +++ b/README.md @@ -31,7 +31,7 @@ Built to demonstrate seamless flow: backend contracts feed a fully-typed React U * **Backend** - .NET 10 and C# 14 adhering to the principles of vertical slice architecture, DDD, CQRS, and clean code * **Frontend** - React 19, TypeScript, TanStack Router & Query, ShadCN 2.0 with Base UI for accessible UI * **CI/CD** - GitHub actions for fast passwordless deployments of docker containers and infrastructure (Bicep) -* **Infrastructure** - Cost efficient and scalable Azure PaaS services like Azure Container Apps, Azure SQL, etc. +* **Infrastructure** - Cost efficient and scalable Azure PaaS services like Azure Container Apps, Azure PostgreSQL, etc. * **Developer CLI** - Extendable .NET CLI for DevEx - set up CI/CD is one command and a couple of questions * **AI rules** - 30+ rules & workflows for Claude Code - sync to other editors can be enabled via `.gitignore` * **Multi-agent workflow** (Experimental) - Specialized autonomous AI agents expert in PlatformPlatform's architecture @@ -178,7 +178,7 @@ Restart your terminal to make the `pp` command available. ## 3. Run the Aspire AppHost to spin up everything on localhost -Using Aspire, docker images with SQL Server, Blob Storage emulator, and development mail server will be downloaded and started. No need to install anything, or learn complicated commands. Simply run this command, and everything just works 🎉 +Using Aspire, docker images with PostgreSQL, Blob Storage emulator, and development mail server will be downloaded and started. No need to install anything, or learn complicated commands. Simply run this command, and everything just works 🎉 With the CLI installed: @@ -447,7 +447,7 @@ PlatformPlatform is a [monorepo](https://en.wikipedia.org/wiki/Monorepo) contain ├─ cloud-infrastructure # Contains Bash and Bicep scripts (IaC) for Azure resources │ ├─ cluster # Scale units like production-west-eu, production-east-us, etc. │ ├─ environment # Shared resources like App Insights, Container Registry, etc. -│ └─ modules # Reusable Bicep modules like Container App, SQL Server, etc. +│ └─ modules # Reusable Bicep modules like Container App, PostgreSQL, etc. └─ developer-cli # A .NET CLI tool for automating common developer tasks ``` @@ -504,7 +504,7 @@ The frontend is built with these technologies: PlatformPlatform's cloud infrastructure is built using the latest Azure Platform as a Service (PaaS) technologies: - [Azure Container Apps](https://learn.microsoft.com/en-us/azure/container-apps/overview) -- [Azure SQL](https://azure.microsoft.com/en-us/products/azure-sql) +- [Azure Database for PostgreSQL](https://azure.microsoft.com/en-us/products/postgresql) - [Azure Blob Storage](https://azure.microsoft.com/en-us/services/storage/blobs) - [Azure Service Bus](https://azure.microsoft.com/en-us/services/service-bus) - [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault) @@ -531,7 +531,7 @@ PlatformPlatform's cloud infrastructure is built using the latest Azure Platform - **Multi-region**: Spinning up a cluster in a new region is a matter of adding one extra deployment job to the GitHub workflow. This allows customers to select a region where their data is close to the user and local data protection laws like GDPR, CCPA, etc. are followed. - **Azure Container Apps**: The application is hosted using Azure Container Apps, which is a new service from Azure that provides a fully managed Kubernetes environment for running containerized applications. You don't need to be a Kubernetes expert to run your application in a scalable and secure environment. - **Scaling from zero to millions of users**: The Azure Container App Environment is configured to scale from zero to millions of users, and the infrastructure is configured to scale automatically based on load. This means the starting costs are very low, and the solution can scale to millions of users without any manual intervention. This enables having Development and Staging environments running with very low costs. -- **Azure SQL**: The database is hosted using Azure SQL Database, which is a fully managed SQL Server instance. SQL Server is known for its high performance, stability, scalability, and security. The server will easily handle millions of users with single-digit millisecond response times. +- **Azure PostgreSQL**: The database is hosted using Azure Database for PostgreSQL Flexible Server, which is a fully managed PostgreSQL database. PostgreSQL is known for its high performance, stability, scalability, and security. The server will easily handle millions of users with single-digit millisecond response times. diff --git a/application/AppHost/AppHost.csproj b/application/AppHost/AppHost.csproj index 882b12a41a..e8271cfe86 100644 --- a/application/AppHost/AppHost.csproj +++ b/application/AppHost/AppHost.csproj @@ -22,7 +22,7 @@ - + diff --git a/application/AppHost/Program.cs b/application/AppHost/Program.cs index 98208396a3..dd3959afcb 100644 --- a/application/AppHost/Program.cs +++ b/application/AppHost/Program.cs @@ -19,10 +19,11 @@ var (stripeConfigured, stripePublishableKey, stripeApiKey, stripeWebhookSecret) = ConfigureStripeParameters(); var stripeFullyConfigured = stripeConfigured && builder.Configuration["Parameters:stripe-webhook-secret"] is not null and not "not-configured"; -var sqlPassword = builder.CreateStablePassword("sql-server-password"); -var sqlServer = builder.AddSqlServer("sql-server", sqlPassword, 9002) - .WithDataVolume("platform-platform-sql-server-data") - .WithLifetime(ContainerLifetime.Persistent); +var postgresPassword = builder.CreateStablePassword("postgres-password"); +var postgres = builder.AddPostgres("postgres", password: postgresPassword, port: 9002) + .WithDataVolume("platform-platform-postgres-data") + .WithLifetime(ContainerLifetime.Persistent) + .WithArgs("-c", "wal_level=logical"); var azureStorage = builder .AddAzureStorage("azure-storage") @@ -56,7 +57,7 @@ .AddJavaScriptApp("frontend-build", "../") .WithEnvironment("CERTIFICATE_PASSWORD", certificatePassword); -var accountDatabase = sqlServer +var accountDatabase = postgres .AddDatabase("account-database", "account"); var accountWorkers = builder @@ -80,7 +81,7 @@ .WithEnvironment("Stripe__AllowMockProvider", "true") .WaitFor(accountWorkers); -var backOfficeDatabase = sqlServer +var backOfficeDatabase = postgres .AddDatabase("back-office-database", "back-office"); var backOfficeWorkers = builder @@ -96,7 +97,7 @@ .WithReference(azureStorage) .WaitFor(backOfficeWorkers); -var mainDatabase = sqlServer +var mainDatabase = postgres .AddDatabase("main-database", "main"); var mainWorkers = builder diff --git a/application/Directory.Packages.props b/application/Directory.Packages.props index e190029453..0c6ba936a4 100644 --- a/application/Directory.Packages.props +++ b/application/Directory.Packages.props @@ -11,8 +11,8 @@ - - + + @@ -35,8 +35,11 @@ + - + + + all diff --git a/application/README.md b/application/README.md index 26408e39fe..cf91d1fff2 100644 --- a/application/README.md +++ b/application/README.md @@ -12,7 +12,7 @@ The point of PlatformPlatform is not to create a distributed system. Since Platf There are also some shared projects: - `SharedKernel` - a foundation with generic functionalities and boilerplate code that are shared between self-contained systems. This ensures a secure and maintainable codebase. This not only guarantees a consistent architecture but also ensures that all self-contained systems are developed in a uniform manner, making it easy for developers to move between systems and focus on the business logic, rather than the infrastructure. In theory the shared kernel is maintained by the PlatformPlatform team, and there should be no reason for you to make changes to this project. - `AppGateway` - the single entry point for all self-contained systems, responsible for routing requests to the correct system using YARP reverse proxy as BFF (Backend for Frontend). It contains logic for refreshing access tokens, and it will eventually also handle tasks like rate limiting, caching, etc. -- `AppHost` - only used for development, this is an Aspire App Host that orchestrates starting all dependencies like SQL Server, Blob Storage, and Mail Server, and then starts all self-contained systems in a single operation. It's a .NET alternative to Docker Compose. While Aspire can also be used for the deployment of infrastructure, this is not used in PlatformPlatform, as it's not mature for enterprise-grade systems. If your self-contained system needs access to a different service, you can add it to the `AppHost` project. +- `AppHost` - only used for development, this is an Aspire App Host that orchestrates starting all dependencies like PostgreSQL, Blob Storage, and Mail Server, and then starts all self-contained systems in a single operation. It's a .NET alternative to Docker Compose. While Aspire can also be used for the deployment of infrastructure, this is not used in PlatformPlatform, as it's not mature for enterprise-grade systems. If your self-contained system needs access to a different service, you can add it to the `AppHost` project. ## Account diff --git a/application/account/Core/Database/DataMigrations/20260211101301_SeedBasisSubscriptions.cs b/application/account/Core/Database/DataMigrations/20260211101301_SeedBasisSubscriptions.cs deleted file mode 100644 index a5b7831d20..0000000000 --- a/application/account/Core/Database/DataMigrations/20260211101301_SeedBasisSubscriptions.cs +++ /dev/null @@ -1,39 +0,0 @@ -using Account.Features.Subscriptions.Domain; -using Microsoft.EntityFrameworkCore; -using SharedKernel.Database; - -namespace Account.Database.DataMigrations; - -public sealed class SeedBasisSubscriptions(AccountDbContext dbContext) : IDataMigration -{ - public string Id => "20260124012001_SeedBasisSubscriptions"; - - public async Task ExecuteAsync(CancellationToken cancellationToken) - { - var tenantIds = await dbContext.Database - .SqlQueryRaw( - """ - SELECT t.Id AS Value - FROM Tenants t - WHERE NOT EXISTS (SELECT 1 FROM Subscriptions s WHERE s.TenantId = t.Id) - """ - ) - .ToListAsync(cancellationToken); - - foreach (var tenantId in tenantIds) - { - var subscriptionId = SubscriptionId.NewId(); - await dbContext.Database.ExecuteSqlAsync( - $""" - INSERT INTO Subscriptions (TenantId, Id, CreatedAt, ModifiedAt, [Plan], ScheduledPlan, StripeCustomerId, StripeSubscriptionId, CurrentPeriodEnd, CancelAtPeriodEnd, FirstPaymentFailedAt, PaymentTransactions, PaymentMethod, BillingInfo) - VALUES ({tenantId}, {subscriptionId.Value}, GETUTCDATE(), NULL, 'Basis', NULL, NULL, NULL, NULL, 0, NULL, '[]', NULL, NULL) - """, - cancellationToken - ); - } - - await dbContext.SaveChangesAsync(cancellationToken); - - return $"Created {tenantIds.Count} basis subscriptions for existing tenants"; - } -} diff --git a/application/account/Core/Database/DataMigrations/20260216195100_SetAllTenantsToActive.cs b/application/account/Core/Database/DataMigrations/20260216195100_SetAllTenantsToActive.cs deleted file mode 100644 index 10c5c164a5..0000000000 --- a/application/account/Core/Database/DataMigrations/20260216195100_SetAllTenantsToActive.cs +++ /dev/null @@ -1,21 +0,0 @@ -using Microsoft.EntityFrameworkCore; -using SharedKernel.Database; - -namespace Account.Database.DataMigrations; - -public sealed class SetAllTenantsToActive(AccountDbContext dbContext) : IDataMigration -{ - public string Id => "20260216195100_SetAllTenantsToActive"; - - public async Task ExecuteAsync(CancellationToken cancellationToken) - { - var updatedCount = await dbContext.Database.ExecuteSqlAsync( - $"UPDATE Tenants SET State = 'Active' WHERE State NOT IN ('Active', 'Suspended')", - cancellationToken - ); - - await dbContext.SaveChangesAsync(cancellationToken); - - return $"Updated {updatedCount} tenants to Active state"; - } -} diff --git a/application/account/Core/Database/Migrations/20260125033200_Initial.cs b/application/account/Core/Database/Migrations/20260125033200_Initial.cs deleted file mode 100644 index d344f6de6b..0000000000 --- a/application/account/Core/Database/Migrations/20260125033200_Initial.cs +++ /dev/null @@ -1,124 +0,0 @@ -using Microsoft.EntityFrameworkCore.Infrastructure; -using Microsoft.EntityFrameworkCore.Migrations; - -namespace Account.Database.Migrations; - -[DbContext(typeof(AccountDbContext))] -[Migration("20260125033200_Initial")] -public sealed class Initial : Migration -{ - protected override void Up(MigrationBuilder migrationBuilder) - { - migrationBuilder.CreateTable( - "Tenants", - table => new - { - Id = table.Column("bigint", nullable: false), - CreatedAt = table.Column("datetimeoffset", nullable: false), - ModifiedAt = table.Column("datetimeoffset", nullable: true), - DeletedAt = table.Column("datetimeoffset", nullable: true), - Name = table.Column("nvarchar(30)", nullable: false), - State = table.Column("varchar(20)", nullable: false), - Logo = table.Column("varchar(150)", nullable: false, defaultValue: "{}") - }, - constraints: table => { table.PrimaryKey("PK_Tenants", x => x.Id); } - ); - - migrationBuilder.CreateTable( - "EmailLogins", - table => new - { - Id = table.Column("varchar(32)", nullable: false), - CreatedAt = table.Column("datetimeoffset", nullable: false), - ModifiedAt = table.Column("datetimeoffset", nullable: true), - Type = table.Column("varchar(20)", nullable: false), - Email = table.Column("nvarchar(100)", nullable: false), - OneTimePasswordHash = table.Column("char(84)", nullable: false), - RetryCount = table.Column("int", nullable: false), - ResendCount = table.Column("int", nullable: false), - Completed = table.Column("bit", nullable: false) - }, - constraints: table => { table.PrimaryKey("PK_EmailLogins", x => x.Id); } - ); - - migrationBuilder.CreateIndex("IX_EmailLogins_Email", "EmailLogins", "Email"); - - migrationBuilder.CreateTable( - "ExternalLogins", - table => new - { - Id = table.Column("varchar(32)", nullable: false), - CreatedAt = table.Column("datetimeoffset", nullable: false), - ModifiedAt = table.Column("datetimeoffset", nullable: true), - Type = table.Column("varchar(20)", nullable: false), - ProviderType = table.Column("varchar(20)", nullable: false), - Email = table.Column("varchar(100)", nullable: true), - CodeVerifier = table.Column("char(128)", nullable: false), - Nonce = table.Column("char(43)", nullable: false), - BrowserFingerprint = table.Column("char(64)", nullable: false), - LoginResult = table.Column("varchar(30)", nullable: true) - }, - constraints: table => { table.PrimaryKey("PK_ExternalLogins", x => x.Id); } - ); - - migrationBuilder.CreateTable( - "Users", - table => new - { - TenantId = table.Column("bigint", nullable: false), - Id = table.Column("varchar(32)", nullable: false), - CreatedAt = table.Column("datetimeoffset", nullable: false), - ModifiedAt = table.Column("datetimeoffset", nullable: true), - DeletedAt = table.Column("datetimeoffset", nullable: true), - LastSeenAt = table.Column("datetimeoffset", nullable: true), - Email = table.Column("nvarchar(100)", nullable: false), - ExternalIdentities = table.Column("nvarchar(max)", nullable: false, defaultValue: "[]"), - EmailConfirmed = table.Column("bit", nullable: false), - FirstName = table.Column("nvarchar(30)", nullable: true), - LastName = table.Column("nvarchar(30)", nullable: true), - Title = table.Column("nvarchar(50)", nullable: true), - Role = table.Column("varchar(20)", nullable: false), - Locale = table.Column("varchar(5)", nullable: false), - Avatar = table.Column("varchar(150)", nullable: false) - }, - constraints: table => - { - table.PrimaryKey("PK_Users", x => x.Id); - table.ForeignKey("FK_Users_Tenants_TenantId", x => x.TenantId, "Tenants", "Id"); - } - ); - - migrationBuilder.CreateIndex("IX_Users_TenantId", "Users", "TenantId"); - migrationBuilder.CreateIndex("IX_Users_TenantId_Email", "Users", ["TenantId", "Email"], unique: true, filter: "[DeletedAt] IS NULL"); - - migrationBuilder.CreateTable( - "Sessions", - table => new - { - TenantId = table.Column("bigint", nullable: false), - Id = table.Column("varchar(32)", nullable: false), - UserId = table.Column("varchar(32)", nullable: false), - CreatedAt = table.Column("datetimeoffset", nullable: false), - ModifiedAt = table.Column("datetimeoffset", nullable: true), - RefreshTokenJti = table.Column("varchar(32)", nullable: false), - PreviousRefreshTokenJti = table.Column("varchar(32)", nullable: true), - RefreshTokenVersion = table.Column("int", nullable: false), - LoginMethod = table.Column("varchar(20)", nullable: false), - DeviceType = table.Column("varchar(20)", nullable: false), - UserAgent = table.Column("nvarchar(500)", nullable: false), - IpAddress = table.Column("varchar(45)", nullable: false), - RevokedAt = table.Column("datetimeoffset", nullable: true), - RevokedReason = table.Column("varchar(20)", nullable: true) - }, - constraints: table => - { - table.PrimaryKey("PK_Sessions", x => x.Id); - table.ForeignKey("FK_Sessions_Tenants_TenantId", x => x.TenantId, "Tenants", "Id"); - table.ForeignKey("FK_Sessions_Users_UserId", x => x.UserId, "Users", "Id", onDelete: ReferentialAction.Cascade); - } - ); - - migrationBuilder.CreateIndex("IX_Sessions_TenantId", "Sessions", "TenantId"); - migrationBuilder.CreateIndex("IX_Sessions_UserId", "Sessions", "UserId"); - } -} diff --git a/application/account/Core/Database/Migrations/20260211101300_AddSubscriptionsAndStripeEvents.cs b/application/account/Core/Database/Migrations/20260211101300_AddSubscriptionsAndStripeEvents.cs deleted file mode 100644 index 65da9d267b..0000000000 --- a/application/account/Core/Database/Migrations/20260211101300_AddSubscriptionsAndStripeEvents.cs +++ /dev/null @@ -1,67 +0,0 @@ -using Microsoft.EntityFrameworkCore.Infrastructure; -using Microsoft.EntityFrameworkCore.Migrations; - -namespace Account.Database.Migrations; - -[DbContext(typeof(AccountDbContext))] -[Migration("20260211101300_AddSubscriptionsAndStripeEvents")] -public sealed class AddSubscriptionsAndStripeEvents : Migration -{ - protected override void Up(MigrationBuilder migrationBuilder) - { - migrationBuilder.AddColumn("SuspensionReason", "Tenants", "varchar(30)", nullable: true); - migrationBuilder.AddColumn("SuspendedAt", "Tenants", "datetimeoffset", nullable: true); - - migrationBuilder.CreateTable( - "Subscriptions", - table => new - { - TenantId = table.Column("bigint", nullable: false), - Id = table.Column("varchar(32)", nullable: false), - CreatedAt = table.Column("datetimeoffset", nullable: false), - ModifiedAt = table.Column("datetimeoffset", nullable: true), - Plan = table.Column("varchar(20)", nullable: false), - ScheduledPlan = table.Column("varchar(20)", nullable: true), - StripeCustomerId = table.Column("varchar(32)", nullable: true), - StripeSubscriptionId = table.Column("varchar(32)", nullable: true), - CurrentPriceAmount = table.Column("decimal(18,2)", nullable: true), - CurrentPriceCurrency = table.Column("varchar(3)", nullable: true), - CurrentPeriodEnd = table.Column("datetimeoffset", nullable: true), - CancelAtPeriodEnd = table.Column("bit", nullable: false), - FirstPaymentFailedAt = table.Column("datetimeoffset", nullable: true), - CancellationReason = table.Column("varchar(20)", nullable: true), - CancellationFeedback = table.Column("nvarchar(500)", nullable: true), - PaymentTransactions = table.Column("nvarchar(max)", nullable: false), - PaymentMethod = table.Column("nvarchar(max)", nullable: true), - BillingInfo = table.Column("nvarchar(max)", nullable: true) - }, - constraints: table => { table.PrimaryKey("PK_Subscriptions", x => x.Id); } - ); - - migrationBuilder.CreateIndex("IX_Subscriptions_TenantId", "Subscriptions", "TenantId", unique: true); - - migrationBuilder.CreateIndex("IX_Subscriptions_StripeCustomerId", "Subscriptions", "StripeCustomerId", unique: true, filter: "StripeCustomerId IS NOT NULL"); - - migrationBuilder.CreateTable( - "StripeEvents", - table => new - { - TenantId = table.Column("bigint", nullable: true), - Id = table.Column("varchar(32)", nullable: false), - CreatedAt = table.Column("datetimeoffset", nullable: false), - ModifiedAt = table.Column("datetimeoffset", nullable: true), - EventType = table.Column("varchar(50)", nullable: false), - Status = table.Column("varchar(20)", nullable: false), - ProcessedAt = table.Column("datetimeoffset", nullable: true), - StripeCustomerId = table.Column("varchar(32)", nullable: true), - StripeSubscriptionId = table.Column("varchar(32)", nullable: true), - Payload = table.Column("nvarchar(max)", nullable: true), - Error = table.Column("nvarchar(500)", nullable: true) - }, - constraints: table => { table.PrimaryKey("PK_StripeEvents", x => x.Id); } - ); - - migrationBuilder.CreateIndex("IX_StripeEvents_TenantId", "StripeEvents", "TenantId"); - migrationBuilder.CreateIndex("IX_StripeEvents_StripeCustomerId_Status", "StripeEvents", ["StripeCustomerId", "Status"]); - } -} diff --git a/application/account/Core/Database/Migrations/20260303023200_Initial.cs b/application/account/Core/Database/Migrations/20260303023200_Initial.cs new file mode 100644 index 0000000000..a91a1eb094 --- /dev/null +++ b/application/account/Core/Database/Migrations/20260303023200_Initial.cs @@ -0,0 +1,195 @@ +using Microsoft.EntityFrameworkCore.Infrastructure; +using Microsoft.EntityFrameworkCore.Migrations; + +namespace Account.Database.Migrations; + +[DbContext(typeof(AccountDbContext))] +[Migration("20260303023200_Initial")] +public sealed class Initial : Migration +{ + protected override void Up(MigrationBuilder migrationBuilder) + { + migrationBuilder.CreateTable( + "__data_migrations_history", + table => new + { + migration_id = table.Column("text", nullable: false), + product_version = table.Column("text", nullable: false), + executed_at = table.Column("timestamptz", nullable: false), + execution_time_ms = table.Column("bigint", nullable: false), + summary = table.Column("text", nullable: false) + }, + constraints: table => { table.PrimaryKey("pk___data_migrations_history", x => x.migration_id); } + ); + + migrationBuilder.CreateTable( + "tenants", + table => new + { + id = table.Column("bigint", nullable: false), + created_at = table.Column("timestamptz", nullable: false), + modified_at = table.Column("timestamptz", nullable: true), + deleted_at = table.Column("timestamptz", nullable: true), + name = table.Column("text", nullable: false), + state = table.Column("text", nullable: false), + plan = table.Column("text", nullable: false), + suspension_reason = table.Column("text", nullable: true), + suspended_at = table.Column("timestamptz", nullable: true), + logo = table.Column("jsonb", nullable: false, defaultValue: "{}") + }, + constraints: table => { table.PrimaryKey("pk_tenants", x => x.id); } + ); + + migrationBuilder.CreateTable( + "email_logins", + table => new + { + id = table.Column("text", nullable: false), + created_at = table.Column("timestamptz", nullable: false), + modified_at = table.Column("timestamptz", nullable: true), + type = table.Column("text", nullable: false), + email = table.Column("text", nullable: false), + one_time_password_hash = table.Column("text", nullable: false), + retry_count = table.Column("integer", nullable: false), + resend_count = table.Column("integer", nullable: false), + completed = table.Column("boolean", nullable: false) + }, + constraints: table => { table.PrimaryKey("pk_email_logins", x => x.id); } + ); + + migrationBuilder.CreateIndex("ix_email_logins_email", "email_logins", "email"); + + migrationBuilder.CreateTable( + "external_logins", + table => new + { + id = table.Column("text", nullable: false), + created_at = table.Column("timestamptz", nullable: false), + modified_at = table.Column("timestamptz", nullable: true), + type = table.Column("text", nullable: false), + provider_type = table.Column("text", nullable: false), + email = table.Column("text", nullable: true), + code_verifier = table.Column("text", nullable: false), + nonce = table.Column("text", nullable: false), + browser_fingerprint = table.Column("text", nullable: false), + login_result = table.Column("text", nullable: true) + }, + constraints: table => { table.PrimaryKey("pk_external_logins", x => x.id); } + ); + + migrationBuilder.CreateTable( + "users", + table => new + { + tenant_id = table.Column("bigint", nullable: false), + id = table.Column("text", nullable: false), + created_at = table.Column("timestamptz", nullable: false), + modified_at = table.Column("timestamptz", nullable: true), + deleted_at = table.Column("timestamptz", nullable: true), + last_seen_at = table.Column("timestamptz", nullable: true), + email = table.Column("text", nullable: false), + external_identities = table.Column("jsonb", nullable: false), + email_confirmed = table.Column("boolean", nullable: false), + first_name = table.Column("text", nullable: true), + last_name = table.Column("text", nullable: true), + title = table.Column("text", nullable: true), + role = table.Column("text", nullable: false), + locale = table.Column("text", nullable: false), + avatar = table.Column("jsonb", nullable: false) + }, + constraints: table => + { + table.PrimaryKey("pk_users", x => x.id); + table.ForeignKey("fk_users_tenants_tenant_id", x => x.tenant_id, "tenants", "id"); + } + ); + + migrationBuilder.CreateIndex("ix_users_tenant_id", "users", "tenant_id"); + migrationBuilder.CreateIndex("ix_users_tenant_id_email", "users", ["tenant_id", "email"], unique: true, filter: "deleted_at IS NULL"); + + migrationBuilder.CreateTable( + "sessions", + table => new + { + tenant_id = table.Column("bigint", nullable: false), + id = table.Column("text", nullable: false), + user_id = table.Column("text", nullable: false), + created_at = table.Column("timestamptz", nullable: false), + modified_at = table.Column("timestamptz", nullable: true), + refresh_token_jti = table.Column("text", nullable: false), + previous_refresh_token_jti = table.Column("text", nullable: true), + refresh_token_version = table.Column("integer", nullable: false), + login_method = table.Column("text", nullable: false), + device_type = table.Column("text", nullable: false), + user_agent = table.Column("text", nullable: false), + ip_address = table.Column("text", nullable: false), + revoked_at = table.Column("timestamptz", nullable: true), + revoked_reason = table.Column("text", nullable: true) + }, + constraints: table => + { + table.PrimaryKey("pk_sessions", x => x.id); + table.ForeignKey("fk_sessions_tenants_tenant_id", x => x.tenant_id, "tenants", "id"); + table.ForeignKey("fk_sessions_users_user_id", x => x.user_id, "users", "id", onDelete: ReferentialAction.Cascade); + } + ); + + migrationBuilder.CreateIndex("ix_sessions_tenant_id", "sessions", "tenant_id"); + migrationBuilder.CreateIndex("ix_sessions_user_id", "sessions", "user_id"); + + migrationBuilder.CreateTable( + "subscriptions", + table => new + { + tenant_id = table.Column("bigint", nullable: false), + id = table.Column("text", nullable: false), + created_at = table.Column("timestamptz", nullable: false), + modified_at = table.Column("timestamptz", nullable: true), + plan = table.Column("text", nullable: false), + scheduled_plan = table.Column("text", nullable: true), + stripe_customer_id = table.Column("text", nullable: true), + stripe_subscription_id = table.Column("text", nullable: true), + current_price_amount = table.Column("numeric(18,2)", nullable: true), + current_price_currency = table.Column("text", nullable: true), + current_period_end = table.Column("timestamptz", nullable: true), + cancel_at_period_end = table.Column("boolean", nullable: false), + first_payment_failed_at = table.Column("timestamptz", nullable: true), + cancellation_reason = table.Column("text", nullable: true), + cancellation_feedback = table.Column("text", nullable: true), + payment_transactions = table.Column("jsonb", nullable: false), + payment_method = table.Column("jsonb", nullable: true), + billing_info = table.Column("jsonb", nullable: true) + }, + constraints: table => + { + table.PrimaryKey("pk_subscriptions", x => x.id); + table.ForeignKey("fk_subscriptions_tenants_tenant_id", x => x.tenant_id, "tenants", "id"); + } + ); + + migrationBuilder.CreateIndex("ix_subscriptions_tenant_id", "subscriptions", "tenant_id", unique: true); + migrationBuilder.CreateIndex("ix_subscriptions_stripe_customer_id", "subscriptions", "stripe_customer_id", unique: true, filter: "stripe_customer_id IS NOT NULL"); + + migrationBuilder.CreateTable( + "stripe_events", + table => new + { + tenant_id = table.Column("bigint", nullable: true), + id = table.Column("text", nullable: false), + created_at = table.Column("timestamptz", nullable: false), + modified_at = table.Column("timestamptz", nullable: true), + event_type = table.Column("text", nullable: false), + status = table.Column("text", nullable: false), + processed_at = table.Column("timestamptz", nullable: true), + stripe_customer_id = table.Column("text", nullable: true), + stripe_subscription_id = table.Column("text", nullable: true), + payload = table.Column("jsonb", nullable: true), + error = table.Column("text", nullable: true) + }, + constraints: table => { table.PrimaryKey("pk_stripe_events", x => x.id); } + ); + + migrationBuilder.CreateIndex("ix_stripe_events_tenant_id", "stripe_events", "tenant_id"); + migrationBuilder.CreateIndex("ix_stripe_events_stripe_customer_id_status", "stripe_events", ["stripe_customer_id", "status"]); + } +} diff --git a/application/account/Core/Features/Authentication/Domain/SessionRepository.cs b/application/account/Core/Features/Authentication/Domain/SessionRepository.cs index 01071836bb..79dc40e21d 100644 --- a/application/account/Core/Features/Authentication/Domain/SessionRepository.cs +++ b/application/account/Core/Features/Authentication/Domain/SessionRepository.cs @@ -1,6 +1,7 @@ using System.Data.Common; using Account.Database; using Microsoft.EntityFrameworkCore; +using Npgsql; using SharedKernel.Authentication.TokenGeneration; using SharedKernel.Domain; using SharedKernel.Persistence; @@ -38,12 +39,12 @@ public interface ISessionRepository : ICrudRepository Task TryRevokeForReplayUnfilteredAsync(SessionId sessionId, DateTimeOffset now, CancellationToken cancellationToken); } -public sealed class SessionRepository(AccountDbContext accountDbContext) +public sealed class SessionRepository(AccountDbContext accountDbContext, IServiceProvider serviceProvider) : RepositoryBase(accountDbContext), ISessionRepository { public async Task GetByIdUnfilteredAsync(SessionId sessionId, CancellationToken cancellationToken) { - return await DbSet.IgnoreQueryFilters().FirstOrDefaultAsync(s => s.Id == sessionId, cancellationToken); + return await DbSet.IgnoreQueryFilters().SingleOrDefaultAsync(s => s.Id == sessionId, cancellationToken); } /// @@ -53,27 +54,28 @@ public sealed class SessionRepository(AccountDbContext accountDbContext) /// public async Task TryRefreshAsync(SessionId sessionId, RefreshTokenJti currentJti, int currentVersion, RefreshTokenJti newJti, DateTimeOffset now, CancellationToken cancellationToken) { - var existingConnection = accountDbContext.Database.GetDbConnection(); - - // Create a new connection of the same type to ensure complete isolation from EF Core's transaction. - await using var connection = (DbConnection)Activator.CreateInstance(existingConnection.GetType())!; - connection.ConnectionString = accountDbContext.Database.GetConnectionString(); - await connection.OpenAsync(cancellationToken); + // Create a new connection to ensure complete isolation from EF Core's transaction. + // Use NpgsqlDataSource from DI to preserve the Entra ID token provider configured for Azure. + // For SQLite (tests), fall back to creating a raw connection from the connection string. + await using var connection = serviceProvider.GetService(typeof(NpgsqlDataSource)) is NpgsqlDataSource npgsqlDataSource + ? await npgsqlDataSource.OpenConnectionAsync(cancellationToken) + : await OpenFallbackConnectionAsync(cancellationToken); await using var command = connection.CreateCommand(); command.CommandText = """ - UPDATE Sessions - SET PreviousRefreshTokenJti = RefreshTokenJti, - RefreshTokenJti = @newJti, - RefreshTokenVersion = RefreshTokenVersion + 1, - ModifiedAt = @now - WHERE Id = @sessionId - AND RefreshTokenJti = @currentJti - AND RefreshTokenVersion = @currentVersion + UPDATE sessions + SET previous_refresh_token_jti = refresh_token_jti, + refresh_token_jti = @newJti, + refresh_token_version = refresh_token_version + 1, + modified_at = @now + WHERE id = @sessionId + AND refresh_token_jti = @currentJti + AND refresh_token_version = @currentVersion """; + var isSqlite = accountDbContext.Database.ProviderName is "Microsoft.EntityFrameworkCore.Sqlite"; AddParameter(command, "@newJti", newJti.Value); - AddParameter(command, "@now", now.ToString("O")); + AddParameter(command, "@now", isSqlite ? now.ToString("O") : now); AddParameter(command, "@sessionId", sessionId.Value); AddParameter(command, "@currentJti", currentJti.Value); AddParameter(command, "@currentVersion", currentVersion); @@ -115,6 +117,15 @@ public async Task GetActiveSessionsForUsersUnfilteredAsync(UserId[] u return sessions.OrderByDescending(s => s.ModifiedAt ?? s.CreatedAt).ToArray(); } + private async Task OpenFallbackConnectionAsync(CancellationToken cancellationToken) + { + var existingConnection = accountDbContext.Database.GetDbConnection(); + var connection = (DbConnection)Activator.CreateInstance(existingConnection.GetType())!; + connection.ConnectionString = accountDbContext.Database.GetConnectionString(); + await connection.OpenAsync(cancellationToken); + return connection; + } + private static void AddParameter(DbCommand command, string name, object value) { var parameter = command.CreateParameter(); diff --git a/application/account/Core/Features/Billing/Queries/GetPaymentHistory.cs b/application/account/Core/Features/Billing/Queries/GetPaymentHistory.cs index 12512e70a9..a53ee76d41 100644 --- a/application/account/Core/Features/Billing/Queries/GetPaymentHistory.cs +++ b/application/account/Core/Features/Billing/Queries/GetPaymentHistory.cs @@ -1,6 +1,8 @@ using Account.Features.Subscriptions.Domain; +using Account.Features.Users.Domain; using JetBrains.Annotations; using SharedKernel.Cqrs; +using SharedKernel.ExecutionContext; namespace Account.Features.Billing.Queries; @@ -21,11 +23,16 @@ public sealed record PaymentTransactionResponse( string? CreditNoteUrl ); -public sealed class GetPaymentHistoryHandler(ISubscriptionRepository subscriptionRepository) +public sealed class GetPaymentHistoryHandler(ISubscriptionRepository subscriptionRepository, IExecutionContext executionContext) : IRequestHandler> { public async Task> Handle(GetPaymentHistoryQuery query, CancellationToken cancellationToken) { + if (executionContext.UserInfo.Role != nameof(UserRole.Owner)) + { + return Result.Forbidden("Only owners can view payment history."); + } + var subscription = await subscriptionRepository.GetCurrentAsync(cancellationToken); var allTransactions = subscription.PaymentTransactions diff --git a/application/account/Core/Features/EmailAuthentication/Domain/EmailLoginConfiguration.cs b/application/account/Core/Features/EmailAuthentication/Domain/EmailLoginConfiguration.cs index 336c9c6567..a681409f46 100644 --- a/application/account/Core/Features/EmailAuthentication/Domain/EmailLoginConfiguration.cs +++ b/application/account/Core/Features/EmailAuthentication/Domain/EmailLoginConfiguration.cs @@ -8,7 +8,6 @@ public sealed class EmailLoginConfiguration : IEntityTypeConfiguration builder) { - builder.ToTable("EmailLogins"); builder.MapStronglyTypedUuid(el => el.Id); } } diff --git a/application/account/Core/Features/ExternalAuthentication/Domain/ExternalLoginConfiguration.cs b/application/account/Core/Features/ExternalAuthentication/Domain/ExternalLoginConfiguration.cs index 5c63149adb..37b9716cbe 100644 --- a/application/account/Core/Features/ExternalAuthentication/Domain/ExternalLoginConfiguration.cs +++ b/application/account/Core/Features/ExternalAuthentication/Domain/ExternalLoginConfiguration.cs @@ -8,7 +8,6 @@ public sealed class ExternalLoginConfiguration : IEntityTypeConfiguration builder) { - builder.ToTable("ExternalLogins"); builder.MapStronglyTypedId(el => el.Id); } } diff --git a/application/account/Core/Features/Subscriptions/Domain/StripeEventConfiguration.cs b/application/account/Core/Features/Subscriptions/Domain/StripeEventConfiguration.cs index 294e3eeabc..1b2ad1e4c3 100644 --- a/application/account/Core/Features/Subscriptions/Domain/StripeEventConfiguration.cs +++ b/application/account/Core/Features/Subscriptions/Domain/StripeEventConfiguration.cs @@ -13,5 +13,6 @@ public void Configure(EntityTypeBuilder builder) builder.MapStronglyTypedNullableId(e => e.StripeCustomerId); builder.MapStronglyTypedNullableId(e => e.StripeSubscriptionId); builder.MapStronglyTypedNullableLongId(e => e.TenantId); + builder.Property(e => e.Payload).HasColumnType("jsonb"); } } diff --git a/application/account/Core/Features/Subscriptions/Domain/SubscriptionConfiguration.cs b/application/account/Core/Features/Subscriptions/Domain/SubscriptionConfiguration.cs index 845cc0014e..9c80e0254d 100644 --- a/application/account/Core/Features/Subscriptions/Domain/SubscriptionConfiguration.cs +++ b/application/account/Core/Features/Subscriptions/Domain/SubscriptionConfiguration.cs @@ -1,5 +1,6 @@ using System.Collections.Immutable; using System.Text.Json; +using Account.Features.Tenants.Domain; using Microsoft.EntityFrameworkCore; using Microsoft.EntityFrameworkCore.ChangeTracking; using Microsoft.EntityFrameworkCore.Metadata.Builders; @@ -16,13 +17,14 @@ public void Configure(EntityTypeBuilder builder) { builder.MapStronglyTypedUuid(s => s.Id); builder.MapStronglyTypedLongId(s => s.TenantId); + builder.HasOne().WithMany().HasForeignKey(s => s.TenantId); builder.MapStronglyTypedNullableId(s => s.StripeCustomerId); builder.MapStronglyTypedNullableId(s => s.StripeSubscriptionId); builder.Property(s => s.CurrentPriceAmount).HasPrecision(18, 2); builder.Property(s => s.PaymentTransactions) - .HasColumnName("PaymentTransactions") + .HasColumnType("jsonb") .HasConversion( v => JsonSerializer.Serialize(v.ToArray(), JsonSerializerOptions), v => JsonSerializer.Deserialize>(v, JsonSerializerOptions) @@ -37,7 +39,7 @@ public void Configure(EntityTypeBuilder builder) builder.OwnsOne(s => s.PaymentMethod, b => b.ToJson()); builder.Property(s => s.BillingInfo) - .HasColumnName("BillingInfo") + .HasColumnType("jsonb") .HasConversion( v => v == null ? null : JsonSerializer.Serialize(v, JsonSerializerOptions), v => v == null ? null : JsonSerializer.Deserialize(v, JsonSerializerOptions) diff --git a/application/account/Core/Features/Subscriptions/Domain/SubscriptionRepository.cs b/application/account/Core/Features/Subscriptions/Domain/SubscriptionRepository.cs index 9f3a583640..27ab0e3a00 100644 --- a/application/account/Core/Features/Subscriptions/Domain/SubscriptionRepository.cs +++ b/application/account/Core/Features/Subscriptions/Domain/SubscriptionRepository.cs @@ -11,7 +11,7 @@ public interface ISubscriptionRepository : ICrudRepository GetCurrentAsync(CancellationToken cancellationToken); /// - /// Retrieves a subscription by Stripe customer ID with pessimistic locking (UPDLOCK). + /// Retrieves a subscription by Stripe customer ID with pessimistic locking (FOR UPDATE). /// This method should only be used in webhook processing to serialize with user-action commands. /// This method bypasses tenant query filters since webhooks have no tenant context. /// @@ -34,19 +34,19 @@ public async Task GetCurrentAsync(CancellationToken cancellationTo } /// - /// Retrieves a subscription by Stripe customer ID with pessimistic locking (UPDLOCK). + /// Retrieves a subscription by Stripe customer ID with pessimistic locking (FOR UPDATE). /// This method should only be used in webhook processing to serialize with user-action commands. /// This method bypasses tenant query filters since webhooks have no tenant context. /// public async Task GetByStripeCustomerIdWithLockUnfilteredAsync(StripeCustomerId stripeCustomerId, CancellationToken cancellationToken) { - if (accountDbContext.Database.ProviderName == "Microsoft.EntityFrameworkCore.Sqlite") + if (accountDbContext.Database.ProviderName is "Microsoft.EntityFrameworkCore.Sqlite") { return await DbSet.IgnoreQueryFilters().SingleOrDefaultAsync(s => s.StripeCustomerId == stripeCustomerId, cancellationToken); } return await DbSet - .FromSqlInterpolated($"SELECT * FROM Subscriptions WITH (UPDLOCK, ROWLOCK) WHERE StripeCustomerId = {stripeCustomerId.Value}") + .FromSqlInterpolated($"SELECT * FROM subscriptions WHERE stripe_customer_id = {stripeCustomerId.Value} FOR UPDATE") .IgnoreQueryFilters() .SingleOrDefaultAsync(cancellationToken); } diff --git a/application/account/Core/Features/Subscriptions/Shared/ProcessPendingStripeEvents.cs b/application/account/Core/Features/Subscriptions/Shared/ProcessPendingStripeEvents.cs index 503322ee9f..0d9858f8e1 100644 --- a/application/account/Core/Features/Subscriptions/Shared/ProcessPendingStripeEvents.cs +++ b/application/account/Core/Features/Subscriptions/Shared/ProcessPendingStripeEvents.cs @@ -77,6 +77,7 @@ private async Task SyncStateFromStripe(Tenant tenant, Subscription subscription, if (customerResult.IsCustomerDeleted) { subscription.ResetToFreePlan(); + tenant.UpdatePlan(SubscriptionPlan.Basis); tenant.Suspend(SuspensionReason.CustomerDeleted, timeProvider.GetUtcNow()); tenantRepository.Update(tenant); subscriptionRepository.Update(subscription); @@ -112,6 +113,7 @@ private async Task SyncStateFromStripe(Tenant tenant, Subscription subscription, if (stripeState is not null) { subscription.SetStripeSubscription(stripeState.StripeSubscriptionId, stripeState.Plan, stripeState.CurrentPriceAmount, stripeState.CurrentPriceCurrency, stripeState.CurrentPeriodEnd, stripeState.PaymentMethod); + tenant.UpdatePlan(stripeState.Plan); } // Always sync payment transactions from Stripe (via subscription when active, via invoices when cancelled) @@ -201,18 +203,21 @@ private async Task SyncStateFromStripe(Tenant tenant, Subscription subscription, if (subscriptionExpired) { subscription.ResetToFreePlan(); + tenant.UpdatePlan(SubscriptionPlan.Basis); events.CollectEvent(new SubscriptionExpired(subscription.Id, previousPlan, daysOnCurrentPlan, previousPriceAmount!.Value, -previousPriceAmount.Value, previousPriceCurrency!)); } if (subscriptionImmediatelyCancelled) { subscription.ResetToFreePlan(); + tenant.UpdatePlan(SubscriptionPlan.Basis); events.CollectEvent(new SubscriptionCancelled(subscription.Id, previousPlan, CancellationReason.CancelledByAdmin, 0, daysOnCurrentPlan, previousPriceAmount!.Value, -previousPriceAmount.Value, previousPriceCurrency!)); } if (subscriptionSuspended) { subscription.ResetToFreePlan(); + tenant.UpdatePlan(SubscriptionPlan.Basis); tenant.Suspend(SuspensionReason.PaymentFailed, timeProvider.GetUtcNow()); events.CollectEvent(new SubscriptionSuspended(subscription.Id, previousPlan, SuspensionReason.PaymentFailed, previousPriceAmount!.Value, -previousPriceAmount.Value, previousPriceCurrency!)); } @@ -240,7 +245,8 @@ private async Task SyncStateFromStripe(Tenant tenant, Subscription subscription, } // Persist all aggregate mutations and mark pending events as processed - if (subscriptionCreated || subscriptionSuspended) + var tenantChanged = stripeState is not null || subscriptionCreated || subscriptionExpired || subscriptionImmediatelyCancelled || subscriptionSuspended; + if (tenantChanged) { tenantRepository.Update(tenant); } diff --git a/application/account/Core/Features/Tenants/Domain/Tenant.cs b/application/account/Core/Features/Tenants/Domain/Tenant.cs index 15b27b5a6f..09aa466434 100644 --- a/application/account/Core/Features/Tenants/Domain/Tenant.cs +++ b/application/account/Core/Features/Tenants/Domain/Tenant.cs @@ -1,3 +1,4 @@ +using Account.Features.Subscriptions.Domain; using SharedKernel.Domain; namespace Account.Features.Tenants.Domain; @@ -7,6 +8,7 @@ public sealed class Tenant : SoftDeletableAggregateRoot private Tenant() : base(TenantId.NewId()) { State = TenantState.Active; + Plan = SubscriptionPlan.Basis; Logo = new Logo(); } @@ -14,6 +16,8 @@ private Tenant() : base(TenantId.NewId()) public TenantState State { get; private set; } + public SubscriptionPlan Plan { get; private set; } + public SuspensionReason? SuspensionReason { get; private set; } public DateTimeOffset? SuspendedAt { get; private set; } @@ -55,6 +59,11 @@ public void RemoveLogo() { Logo = new Logo(Version: Logo.Version); } + + public void UpdatePlan(SubscriptionPlan plan) + { + Plan = plan; + } } public sealed record Logo(string? Url = null, int Version = 0); diff --git a/application/account/Core/Features/Tenants/Domain/TenantRepository.cs b/application/account/Core/Features/Tenants/Domain/TenantRepository.cs index de2cc81ef7..402cd52e51 100644 --- a/application/account/Core/Features/Tenants/Domain/TenantRepository.cs +++ b/application/account/Core/Features/Tenants/Domain/TenantRepository.cs @@ -41,6 +41,6 @@ public async Task GetByIdsAsync(TenantId[] ids, CancellationToken canc /// public async Task GetByIdUnfilteredAsync(TenantId id, CancellationToken cancellationToken) { - return await DbSet.IgnoreQueryFilters().FirstOrDefaultAsync(t => t.Id == id, cancellationToken); + return await DbSet.IgnoreQueryFilters().SingleOrDefaultAsync(t => t.Id == id, cancellationToken); } } diff --git a/application/account/Core/Features/Users/Domain/UserConfiguration.cs b/application/account/Core/Features/Users/Domain/UserConfiguration.cs index c0eef7951c..c9078dcbd6 100644 --- a/application/account/Core/Features/Users/Domain/UserConfiguration.cs +++ b/application/account/Core/Features/Users/Domain/UserConfiguration.cs @@ -25,7 +25,7 @@ public void Configure(EntityTypeBuilder builder) .HasPrincipalKey(t => t.Id); builder.Property(u => u.ExternalIdentities) - .HasColumnName("ExternalIdentities") + .HasColumnType("jsonb") .HasConversion( v => JsonSerializer.Serialize(v.ToArray(), JsonSerializerOptions), v => JsonSerializer.Deserialize>(v, JsonSerializerOptions) diff --git a/application/account/Core/Features/Users/Domain/UserRepository.cs b/application/account/Core/Features/Users/Domain/UserRepository.cs index d29c962aa0..0f8b1fd138 100644 --- a/application/account/Core/Features/Users/Domain/UserRepository.cs +++ b/application/account/Core/Features/Users/Domain/UserRepository.cs @@ -116,23 +116,38 @@ public async Task GetDeletedByIdsAsync(UserId[] ids, CancellationToken c public async Task<(int TotalUsers, int ActiveUsers, int PendingUsers)> GetUserSummaryAsync(CancellationToken cancellationToken) { - var thirtyDaysAgo = timeProvider.GetUtcNow().AddDays(-30).ToString("O"); - var tenantId = executionContext.TenantId!.Value.ToString(); - - var sql = """ - SELECT - COUNT(*) AS TotalUsers, - SUM(CASE WHEN EmailConfirmed = 1 AND LastSeenAt >= {0} THEN 1 ELSE 0 END) AS ActiveUsers, - SUM(CASE WHEN EmailConfirmed = 0 THEN 1 ELSE 0 END) AS PendingUsers - FROM Users - WHERE TenantId = {1} AND DeletedAt IS NULL - """; - - var result = await accountDbContext.Database - .SqlQueryRaw(sql, thirtyDaysAgo, tenantId) - .SingleAsync(cancellationToken); - - return (result.TotalUsers, result.ActiveUsers, result.PendingUsers); + var thirtyDaysAgo = timeProvider.GetUtcNow().AddDays(-30); + + if (accountDbContext.Database.ProviderName is "Microsoft.EntityFrameworkCore.Sqlite") + { + var sql = """ + SELECT + COUNT(*) AS total_users, + SUM(CASE WHEN email_confirmed = 1 AND last_seen_at >= {0} THEN 1 ELSE 0 END) AS active_users, + SUM(CASE WHEN email_confirmed = 0 THEN 1 ELSE 0 END) AS pending_users + FROM users + WHERE tenant_id = {1} AND deleted_at IS NULL + """; + + var result = await accountDbContext.Database + .SqlQueryRaw(sql, thirtyDaysAgo.ToString("O"), executionContext.TenantId!.Value.ToString()) + .SingleAsync(cancellationToken); + + return (result.TotalUsers, result.ActiveUsers, result.PendingUsers); + } + + var totalUsers = await DbSet.CountAsync(cancellationToken); + + var activeUsers = await DbSet + .Where(u => u.EmailConfirmed) + .Where(u => u.LastSeenAt >= thirtyDaysAgo) + .CountAsync(cancellationToken); + + var pendingUsers = await DbSet + .Where(u => !u.EmailConfirmed) + .CountAsync(cancellationToken); + + return (totalUsers, activeUsers, pendingUsers); } public async Task<(User[] Users, int TotalItems, int TotalPages)> Search( @@ -241,6 +256,7 @@ public async Task GetUsersByEmailUnfilteredAsync(string email, Cancellat return await DbSet .IgnoreQueryFilters([QueryFilterNames.Tenant]) .Where(u => u.Email == email.ToLowerInvariant()) + .OrderBy(u => u.Id) .ToArrayAsync(cancellationToken); } diff --git a/application/account/Tests/Authentication/GetUserSessionsTests.cs b/application/account/Tests/Authentication/GetUserSessionsTests.cs index bae3483c70..5deba35890 100644 --- a/application/account/Tests/Authentication/GetUserSessionsTests.cs +++ b/application/account/Tests/Authentication/GetUserSessionsTests.cs @@ -2,6 +2,7 @@ using Account.Database; using Account.Features.Authentication.Domain; using Account.Features.Authentication.Queries; +using Account.Features.Subscriptions.Domain; using FluentAssertions; using SharedKernel.Authentication.TokenGeneration; using SharedKernel.Domain; @@ -128,13 +129,14 @@ private long InsertTenant(string name) var tenantId = TenantId.NewId().Value; var now = TimeProvider.System.GetUtcNow(); - Connection.Insert("Tenants", [ - ("Id", tenantId), - ("CreatedAt", now), - ("ModifiedAt", null), - ("Name", name), - ("State", "Active"), - ("Logo", """{"Url":null,"Version":0}""") + Connection.Insert("tenants", [ + ("id", tenantId), + ("created_at", now), + ("modified_at", null), + ("name", name), + ("state", "Active"), + ("logo", """{"Url":null,"Version":0}"""), + ("plan", nameof(SubscriptionPlan.Basis)) ] ); @@ -145,20 +147,20 @@ private void InsertUser(long tenantId, UserId userId, string email) { var now = TimeProvider.System.GetUtcNow(); - Connection.Insert("Users", [ - ("TenantId", tenantId), - ("Id", userId.ToString()), - ("CreatedAt", now), - ("ModifiedAt", null), - ("Email", email), - ("EmailConfirmed", true), - ("FirstName", "Test"), - ("LastName", "User"), - ("Title", null), - ("Avatar", """{"Url":null,"Version":0,"IsGravatar":false}"""), - ("Role", "Owner"), - ("Locale", "en-US"), - ("ExternalIdentities", "[]") + Connection.Insert("users", [ + ("tenant_id", tenantId), + ("id", userId.ToString()), + ("created_at", now), + ("modified_at", null), + ("email", email), + ("email_confirmed", true), + ("first_name", "Test"), + ("last_name", "User"), + ("title", null), + ("avatar", """{"Url":null,"Version":0,"IsGravatar":false}"""), + ("role", "Owner"), + ("locale", "en-US"), + ("external_identities", "[]") ] ); } @@ -169,21 +171,21 @@ private string InsertSession(long tenantId, string userId, bool isRevoked = fals var jti = RefreshTokenJti.NewId().ToString(); var now = TimeProvider.System.GetUtcNow(); - Connection.Insert("Sessions", [ - ("TenantId", tenantId), - ("Id", sessionId), - ("UserId", userId), - ("CreatedAt", now), - ("ModifiedAt", null), - ("RefreshTokenJti", jti), - ("PreviousRefreshTokenJti", null), - ("RefreshTokenVersion", 1), - ("LoginMethod", nameof(LoginMethod.OneTimePassword)), - ("DeviceType", nameof(DeviceType.Desktop)), - ("UserAgent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64)"), - ("IpAddress", "127.0.0.1"), - ("RevokedAt", isRevoked ? now : null), - ("RevokedReason", null) + Connection.Insert("sessions", [ + ("tenant_id", tenantId), + ("id", sessionId), + ("user_id", userId), + ("created_at", now), + ("modified_at", null), + ("refresh_token_jti", jti), + ("previous_refresh_token_jti", null), + ("refresh_token_version", 1), + ("login_method", nameof(LoginMethod.OneTimePassword)), + ("device_type", nameof(DeviceType.Desktop)), + ("user_agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64)"), + ("ip_address", "127.0.0.1"), + ("revoked_at", isRevoked ? now : null), + ("revoked_reason", null) ] ); diff --git a/application/account/Tests/Authentication/LogoutTests.cs b/application/account/Tests/Authentication/LogoutTests.cs index 9f23fcd9a2..6675fa41d8 100644 --- a/application/account/Tests/Authentication/LogoutTests.cs +++ b/application/account/Tests/Authentication/LogoutTests.cs @@ -16,9 +16,9 @@ public async Task Logout_WhenAuthenticatedAsOwner_ShouldRevokeSessionAndCollectL { // Arrange var sessionId = DatabaseSeeder.Tenant1OwnerSession.Id.ToString(); - Connection.RowExists("Sessions", sessionId).Should().BeTrue(); + Connection.RowExists("sessions", sessionId).Should().BeTrue(); object[] parameters = [new { id = sessionId }]; - Connection.ExecuteScalar("SELECT RevokedAt FROM Sessions WHERE Id = @id", parameters).Should().BeNull(); + Connection.ExecuteScalar("SELECT revoked_at FROM sessions WHERE id = @id", parameters).Should().BeNull(); var command = new LogoutCommand(); // Act @@ -26,8 +26,8 @@ public async Task Logout_WhenAuthenticatedAsOwner_ShouldRevokeSessionAndCollectL // Assert await response.ShouldBeSuccessfulPostRequest(hasLocation: false); - Connection.ExecuteScalar("SELECT RevokedAt FROM Sessions WHERE Id = @id", parameters).Should().NotBeNull(); - Connection.ExecuteScalar("SELECT RevokedReason FROM Sessions WHERE Id = @id", parameters).Should().Be("LoggedOut"); + Connection.ExecuteScalar("SELECT revoked_at FROM sessions WHERE id = @id", parameters).Should().NotBeNull(); + Connection.ExecuteScalar("SELECT revoked_reason FROM sessions WHERE id = @id", parameters).Should().Be("LoggedOut"); TelemetryEventsCollectorSpy.CollectedEvents.Count.Should().Be(2); TelemetryEventsCollectorSpy.CollectedEvents[0].GetType().Name.Should().Be("SessionRevoked"); TelemetryEventsCollectorSpy.CollectedEvents[0].Properties["event.reason"].Should().Be("LoggedOut"); @@ -40,9 +40,9 @@ public async Task Logout_WhenAuthenticatedAsMember_ShouldRevokeSessionAndCollect { // Arrange var sessionId = DatabaseSeeder.Tenant1MemberSession.Id.ToString(); - Connection.RowExists("Sessions", sessionId).Should().BeTrue(); + Connection.RowExists("sessions", sessionId).Should().BeTrue(); object[] parameters = [new { id = sessionId }]; - Connection.ExecuteScalar("SELECT RevokedAt FROM Sessions WHERE Id = @id", parameters).Should().BeNull(); + Connection.ExecuteScalar("SELECT revoked_at FROM sessions WHERE id = @id", parameters).Should().BeNull(); var command = new LogoutCommand(); // Act @@ -50,8 +50,8 @@ public async Task Logout_WhenAuthenticatedAsMember_ShouldRevokeSessionAndCollect // Assert await response.ShouldBeSuccessfulPostRequest(hasLocation: false); - Connection.ExecuteScalar("SELECT RevokedAt FROM Sessions WHERE Id = @id", parameters).Should().NotBeNull(); - Connection.ExecuteScalar("SELECT RevokedReason FROM Sessions WHERE Id = @id", parameters).Should().Be("LoggedOut"); + Connection.ExecuteScalar("SELECT revoked_at FROM sessions WHERE id = @id", parameters).Should().NotBeNull(); + Connection.ExecuteScalar("SELECT revoked_reason FROM sessions WHERE id = @id", parameters).Should().Be("LoggedOut"); TelemetryEventsCollectorSpy.CollectedEvents.Count.Should().Be(2); TelemetryEventsCollectorSpy.CollectedEvents[0].GetType().Name.Should().Be("SessionRevoked"); TelemetryEventsCollectorSpy.CollectedEvents[0].Properties["event.reason"].Should().Be("LoggedOut"); diff --git a/application/account/Tests/Authentication/RefreshAuthenticationTokensTests.cs b/application/account/Tests/Authentication/RefreshAuthenticationTokensTests.cs index 53140726b2..5d8d5796a5 100644 --- a/application/account/Tests/Authentication/RefreshAuthenticationTokensTests.cs +++ b/application/account/Tests/Authentication/RefreshAuthenticationTokensTests.cs @@ -39,7 +39,7 @@ public async Task RefreshAuthenticationTokens_WhenValidToken_ShouldRefreshAndInc // Assert response.StatusCode.Should().Be(HttpStatusCode.OK); - var updatedVersion = Connection.ExecuteScalar("SELECT RefreshTokenVersion FROM Sessions WHERE Id = @id", [new { id = sessionId.ToString() }]); + var updatedVersion = Connection.ExecuteScalar("SELECT refresh_token_version FROM sessions WHERE id = @id", [new { id = sessionId.ToString() }]); updatedVersion.Should().Be(2); } @@ -62,7 +62,7 @@ public async Task RefreshAuthenticationTokens_WhenPreviousVersionWithinGracePeri // Assert response.StatusCode.Should().Be(HttpStatusCode.OK); - var sessionVersion = Connection.ExecuteScalar("SELECT RefreshTokenVersion FROM Sessions WHERE Id = @id", [new { id = sessionId.ToString() }]); + var sessionVersion = Connection.ExecuteScalar("SELECT refresh_token_version FROM sessions WHERE id = @id", [new { id = sessionId.ToString() }]); sessionVersion.Should().Be(2); } @@ -88,8 +88,8 @@ public async Task RefreshAuthenticationTokens_WhenReplayAttackDetected_ShouldRev response.Headers.GetValues("x-unauthorized-reason").Single().Should().Be("ReplayAttackDetected"); object[] parameters = [new { id = sessionId.ToString() }]; - Connection.ExecuteScalar("SELECT RevokedAt FROM Sessions WHERE Id = @id", parameters).Should().NotBeNull(); - Connection.ExecuteScalar("SELECT RevokedReason FROM Sessions WHERE Id = @id", parameters).Should().Be("ReplayAttackDetected"); + Connection.ExecuteScalar("SELECT revoked_at FROM sessions WHERE id = @id", parameters).Should().NotBeNull(); + Connection.ExecuteScalar("SELECT revoked_reason FROM sessions WHERE id = @id", parameters).Should().Be("ReplayAttackDetected"); TelemetryEventsCollectorSpy.CollectedEvents.Count.Should().Be(1); TelemetryEventsCollectorSpy.CollectedEvents[0].GetType().Name.Should().Be("SessionReplayDetected"); @@ -158,7 +158,7 @@ public async Task RefreshAuthenticationTokens_WhenSequentialRequestsWithSameToke response1.StatusCode.Should().Be(HttpStatusCode.OK); response2.StatusCode.Should().Be(HttpStatusCode.OK); - var sessionVersion = Connection.ExecuteScalar("SELECT RefreshTokenVersion FROM Sessions WHERE Id = @id", [new { id = sessionId.ToString() }]); + var sessionVersion = Connection.ExecuteScalar("SELECT refresh_token_version FROM sessions WHERE id = @id", [new { id = sessionId.ToString() }]); sessionVersion.Should().Be(2); } @@ -181,21 +181,21 @@ private void InsertSession(long tenantId, string userId, SessionId sessionId, Re { var now = TimeProvider.System.GetUtcNow(); - Connection.Insert("Sessions", [ - ("TenantId", tenantId), - ("Id", sessionId.ToString()), - ("UserId", userId), - ("CreatedAt", now), - ("ModifiedAt", null), - ("RefreshTokenJti", jti.ToString()), - ("PreviousRefreshTokenJti", null), - ("RefreshTokenVersion", version), - ("LoginMethod", nameof(LoginMethod.OneTimePassword)), - ("DeviceType", nameof(DeviceType.Desktop)), - ("UserAgent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64)"), - ("IpAddress", "127.0.0.1"), - ("RevokedAt", isRevoked ? now : null), - ("RevokedReason", null) + Connection.Insert("sessions", [ + ("tenant_id", tenantId), + ("id", sessionId.ToString()), + ("user_id", userId), + ("created_at", now), + ("modified_at", null), + ("refresh_token_jti", jti.ToString()), + ("previous_refresh_token_jti", null), + ("refresh_token_version", version), + ("login_method", nameof(LoginMethod.OneTimePassword)), + ("device_type", nameof(DeviceType.Desktop)), + ("user_agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64)"), + ("ip_address", "127.0.0.1"), + ("revoked_at", isRevoked ? now : null), + ("revoked_reason", null) ] ); } @@ -204,21 +204,21 @@ private void InsertSessionWithGracePeriod(long tenantId, string userId, SessionI { var now = TimeProvider.System.GetUtcNow(); - Connection.Insert("Sessions", [ - ("TenantId", tenantId), - ("Id", sessionId.ToString()), - ("UserId", userId), - ("CreatedAt", now), - ("ModifiedAt", modifiedAt), - ("RefreshTokenJti", currentJti.ToString()), - ("PreviousRefreshTokenJti", previousJti?.ToString()), - ("RefreshTokenVersion", currentVersion), - ("LoginMethod", nameof(LoginMethod.OneTimePassword)), - ("DeviceType", nameof(DeviceType.Desktop)), - ("UserAgent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64)"), - ("IpAddress", "127.0.0.1"), - ("RevokedAt", null), - ("RevokedReason", null) + Connection.Insert("sessions", [ + ("tenant_id", tenantId), + ("id", sessionId.ToString()), + ("user_id", userId), + ("created_at", now), + ("modified_at", modifiedAt), + ("refresh_token_jti", currentJti.ToString()), + ("previous_refresh_token_jti", previousJti?.ToString()), + ("refresh_token_version", currentVersion), + ("login_method", nameof(LoginMethod.OneTimePassword)), + ("device_type", nameof(DeviceType.Desktop)), + ("user_agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64)"), + ("ip_address", "127.0.0.1"), + ("revoked_at", null), + ("revoked_reason", null) ] ); } diff --git a/application/account/Tests/Authentication/RevokeSessionTests.cs b/application/account/Tests/Authentication/RevokeSessionTests.cs index 346351a0e3..1a4ad8c021 100644 --- a/application/account/Tests/Authentication/RevokeSessionTests.cs +++ b/application/account/Tests/Authentication/RevokeSessionTests.cs @@ -23,8 +23,8 @@ public async Task RevokeSession_WhenValid_ShouldRevokeSession() // Assert response.ShouldHaveEmptyHeaderAndLocationOnSuccess(); object[] parameters = [new { id = sessionId }]; - Connection.ExecuteScalar("SELECT RevokedAt FROM Sessions WHERE Id = @id", parameters).Should().NotBeNull(); - Connection.ExecuteScalar("SELECT RevokedReason FROM Sessions WHERE Id = @id", parameters).Should().Be("Revoked"); + Connection.ExecuteScalar("SELECT revoked_at FROM sessions WHERE id = @id", parameters).Should().NotBeNull(); + Connection.ExecuteScalar("SELECT revoked_reason FROM sessions WHERE id = @id", parameters).Should().Be("Revoked"); TelemetryEventsCollectorSpy.CollectedEvents.Count.Should().Be(1); TelemetryEventsCollectorSpy.CollectedEvents[0].GetType().Name.Should().Be("SessionRevoked"); @@ -96,21 +96,21 @@ private string InsertSession(long tenantId, string userId, bool isRevoked = fals var jti = RefreshTokenJti.NewId().ToString(); var now = TimeProvider.System.GetUtcNow(); - Connection.Insert("Sessions", [ - ("TenantId", tenantId), - ("Id", sessionId), - ("UserId", userId), - ("CreatedAt", now), - ("ModifiedAt", null), - ("RefreshTokenJti", jti), - ("PreviousRefreshTokenJti", null), - ("RefreshTokenVersion", 1), - ("LoginMethod", nameof(LoginMethod.OneTimePassword)), - ("DeviceType", nameof(DeviceType.Desktop)), - ("UserAgent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64)"), - ("IpAddress", "127.0.0.1"), - ("RevokedAt", isRevoked ? now : null), - ("RevokedReason", null) + Connection.Insert("sessions", [ + ("tenant_id", tenantId), + ("id", sessionId), + ("user_id", userId), + ("created_at", now), + ("modified_at", null), + ("refresh_token_jti", jti), + ("previous_refresh_token_jti", null), + ("refresh_token_version", 1), + ("login_method", nameof(LoginMethod.OneTimePassword)), + ("device_type", nameof(DeviceType.Desktop)), + ("user_agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64)"), + ("ip_address", "127.0.0.1"), + ("revoked_at", isRevoked ? now : null), + ("revoked_reason", null) ] ); diff --git a/application/account/Tests/Authentication/SwitchTenantTests.cs b/application/account/Tests/Authentication/SwitchTenantTests.cs index eca3f0a5b8..05a60d9968 100644 --- a/application/account/Tests/Authentication/SwitchTenantTests.cs +++ b/application/account/Tests/Authentication/SwitchTenantTests.cs @@ -24,32 +24,33 @@ public async Task SwitchTenant_WhenUserExistsInTargetTenant_ShouldSwitchSuccessf var tenant2Name = Faker.Company.CompanyName(); var user2Id = UserId.NewId(); - Connection.Insert("Tenants", [ - ("Id", tenant2Id.Value), - ("CreatedAt", TimeProvider.GetUtcNow()), - ("ModifiedAt", null), - ("Name", tenant2Name), - ("State", nameof(TenantState.Active)), - ("Logo", """{"Url":null,"Version":0}""") + Connection.Insert("tenants", [ + ("id", tenant2Id.Value), + ("created_at", TimeProvider.GetUtcNow()), + ("modified_at", null), + ("name", tenant2Name), + ("state", nameof(TenantState.Active)), + ("logo", """{"Url":null,"Version":0}"""), + ("plan", nameof(SubscriptionPlan.Basis)) ] ); InsertSubscription(tenant2Id); - Connection.Insert("Users", [ - ("TenantId", tenant2Id.Value), - ("Id", user2Id.ToString()), - ("CreatedAt", TimeProvider.GetUtcNow()), - ("ModifiedAt", null), - ("Email", DatabaseSeeder.Tenant1Member.Email), - ("EmailConfirmed", true), - ("FirstName", Faker.Name.FirstName()), - ("LastName", Faker.Name.LastName()), - ("Title", null), - ("Avatar", JsonSerializer.Serialize(new Avatar())), - ("Role", nameof(UserRole.Member)), - ("Locale", "en-US"), - ("ExternalIdentities", "[]") + Connection.Insert("users", [ + ("tenant_id", tenant2Id.Value), + ("id", user2Id.ToString()), + ("created_at", TimeProvider.GetUtcNow()), + ("modified_at", null), + ("email", DatabaseSeeder.Tenant1Member.Email), + ("email_confirmed", true), + ("first_name", Faker.Name.FirstName()), + ("last_name", Faker.Name.LastName()), + ("title", null), + ("avatar", JsonSerializer.Serialize(new Avatar())), + ("role", nameof(UserRole.Member)), + ("locale", "en-US"), + ("external_identities", "[]") ] ); @@ -66,7 +67,7 @@ public async Task SwitchTenant_WhenUserExistsInTargetTenant_ShouldSwitchSuccessf response.Headers.Count(h => h.Key == "x-access-token").Should().Be(1); var oldSessionRevokedReason = Connection.ExecuteScalar( - "SELECT RevokedReason FROM Sessions WHERE Id = @Id", + "SELECT revoked_reason FROM sessions WHERE id = @Id", [new { Id = DatabaseSeeder.Tenant1MemberSession.Id.ToString() }] ); oldSessionRevokedReason.Should().Be("SwitchTenant"); @@ -104,30 +105,31 @@ public async Task SwitchTenant_WhenUserDoesNotExistInTargetTenant_ShouldReturnFo // Arrange var tenant2Id = TenantId.NewId(); - Connection.Insert("Tenants", [ - ("Id", tenant2Id.Value), - ("CreatedAt", TimeProvider.GetUtcNow()), - ("ModifiedAt", null), - ("Name", Faker.Company.CompanyName()), - ("State", nameof(TenantState.Active)), - ("Logo", """{"Url":null,"Version":0}""") + Connection.Insert("tenants", [ + ("id", tenant2Id.Value), + ("created_at", TimeProvider.GetUtcNow()), + ("modified_at", null), + ("name", Faker.Company.CompanyName()), + ("state", nameof(TenantState.Active)), + ("logo", """{"Url":null,"Version":0}"""), + ("plan", nameof(SubscriptionPlan.Basis)) ] ); - Connection.Insert("Users", [ - ("TenantId", tenant2Id.Value), - ("Id", UserId.NewId().ToString()), - ("CreatedAt", TimeProvider.GetUtcNow()), - ("ModifiedAt", null), - ("Email", Faker.Internet.UniqueEmail()), - ("EmailConfirmed", true), - ("FirstName", Faker.Name.FirstName()), - ("LastName", Faker.Name.LastName()), - ("Title", null), - ("Avatar", JsonSerializer.Serialize(new Avatar())), - ("Role", nameof(UserRole.Owner)), - ("Locale", "en-US"), - ("ExternalIdentities", "[]") + Connection.Insert("users", [ + ("tenant_id", tenant2Id.Value), + ("id", UserId.NewId().ToString()), + ("created_at", TimeProvider.GetUtcNow()), + ("modified_at", null), + ("email", Faker.Internet.UniqueEmail()), + ("email_confirmed", true), + ("first_name", Faker.Name.FirstName()), + ("last_name", Faker.Name.LastName()), + ("title", null), + ("avatar", JsonSerializer.Serialize(new Avatar())), + ("role", nameof(UserRole.Owner)), + ("locale", "en-US"), + ("external_identities", "[]") ] ); @@ -164,32 +166,33 @@ public async Task SwitchTenant_WhenUserEmailNotConfirmed_ShouldConfirmEmail() var tenant2Name = Faker.Company.CompanyName(); var user2Id = UserId.NewId(); - Connection.Insert("Tenants", [ - ("Id", tenant2Id.Value), - ("CreatedAt", TimeProvider.GetUtcNow()), - ("ModifiedAt", null), - ("Name", tenant2Name), - ("State", nameof(TenantState.Active)), - ("Logo", """{"Url":null,"Version":0}""") + Connection.Insert("tenants", [ + ("id", tenant2Id.Value), + ("created_at", TimeProvider.GetUtcNow()), + ("modified_at", null), + ("name", tenant2Name), + ("state", nameof(TenantState.Active)), + ("logo", """{"Url":null,"Version":0}"""), + ("plan", nameof(SubscriptionPlan.Basis)) ] ); InsertSubscription(tenant2Id); - Connection.Insert("Users", [ - ("TenantId", tenant2Id.Value), - ("Id", user2Id.ToString()), - ("CreatedAt", TimeProvider.GetUtcNow()), - ("ModifiedAt", null), - ("Email", DatabaseSeeder.Tenant1Member.Email), - ("EmailConfirmed", false), // User's email is not confirmed - ("FirstName", Faker.Name.FirstName()), - ("LastName", Faker.Name.LastName()), - ("Title", null), - ("Avatar", JsonSerializer.Serialize(new Avatar())), - ("Role", nameof(UserRole.Member)), - ("Locale", "en-US"), - ("ExternalIdentities", "[]") + Connection.Insert("users", [ + ("tenant_id", tenant2Id.Value), + ("id", user2Id.ToString()), + ("created_at", TimeProvider.GetUtcNow()), + ("modified_at", null), + ("email", DatabaseSeeder.Tenant1Member.Email), + ("email_confirmed", false), // User's email is not confirmed + ("first_name", Faker.Name.FirstName()), + ("last_name", Faker.Name.LastName()), + ("title", null), + ("avatar", JsonSerializer.Serialize(new Avatar())), + ("role", nameof(UserRole.Member)), + ("locale", "en-US"), + ("external_identities", "[]") ] ); @@ -205,7 +208,7 @@ public async Task SwitchTenant_WhenUserEmailNotConfirmed_ShouldConfirmEmail() // Verify that the user's email is now confirmed var emailConfirmed = Connection.ExecuteScalar( - "SELECT EmailConfirmed FROM Users WHERE Id = @Id", + "SELECT email_confirmed FROM users WHERE id = @Id", [new { Id = user2Id.ToString() }] ); emailConfirmed.Should().Be(1); // SQLite stores boolean as 0/1 @@ -226,41 +229,42 @@ public async Task SwitchTenant_WhenAcceptingInvite_ShouldCopyProfileData() var currentLocale = "da-DK"; // Update current user with profile data - Connection.Update("Users", "Id", DatabaseSeeder.Tenant1Member.Id.ToString(), [ - ("FirstName", currentFirstName), - ("LastName", currentLastName), - ("Title", currentTitle), - ("Locale", currentLocale) + Connection.Update("users", "id", DatabaseSeeder.Tenant1Member.Id.ToString(), [ + ("first_name", currentFirstName), + ("last_name", currentLastName), + ("title", currentTitle), + ("locale", currentLocale) ] ); - Connection.Insert("Tenants", [ - ("Id", tenant2Id.Value), - ("CreatedAt", TimeProvider.GetUtcNow()), - ("ModifiedAt", null), - ("Name", tenant2Name), - ("State", nameof(TenantState.Active)), - ("Logo", """{"Url":null,"Version":0}""") + Connection.Insert("tenants", [ + ("id", tenant2Id.Value), + ("created_at", TimeProvider.GetUtcNow()), + ("modified_at", null), + ("name", tenant2Name), + ("state", nameof(TenantState.Active)), + ("logo", """{"Url":null,"Version":0}"""), + ("plan", nameof(SubscriptionPlan.Basis)) ] ); InsertSubscription(tenant2Id); // New user has no profile data and unconfirmed email - Connection.Insert("Users", [ - ("TenantId", tenant2Id.Value), - ("Id", user2Id.ToString()), - ("CreatedAt", TimeProvider.GetUtcNow()), - ("ModifiedAt", null), - ("Email", DatabaseSeeder.Tenant1Member.Email), - ("EmailConfirmed", false), // Unconfirmed - invitation pending - ("FirstName", null), - ("LastName", null), - ("Title", "Manager"), // Has a title that will be overwritten - ("Avatar", JsonSerializer.Serialize(new Avatar())), - ("Role", nameof(UserRole.Member)), - ("Locale", "en-US"), - ("ExternalIdentities", "[]") + Connection.Insert("users", [ + ("tenant_id", tenant2Id.Value), + ("id", user2Id.ToString()), + ("created_at", TimeProvider.GetUtcNow()), + ("modified_at", null), + ("email", DatabaseSeeder.Tenant1Member.Email), + ("email_confirmed", false), // Unconfirmed - invitation pending + ("first_name", null), + ("last_name", null), + ("title", "Manager"), // Has a title that will be overwritten + ("avatar", JsonSerializer.Serialize(new Avatar())), + ("role", nameof(UserRole.Member)), + ("locale", "en-US"), + ("external_identities", "[]") ] ); @@ -276,23 +280,23 @@ public async Task SwitchTenant_WhenAcceptingInvite_ShouldCopyProfileData() // Verify profile data was copied var firstName = Connection.ExecuteScalar( - "SELECT FirstName FROM Users WHERE Id = @Id", + "SELECT first_name FROM users WHERE id = @Id", [new { Id = user2Id.ToString() }] ); var lastName = Connection.ExecuteScalar( - "SELECT LastName FROM Users WHERE Id = @Id", + "SELECT last_name FROM users WHERE id = @Id", [new { Id = user2Id.ToString() }] ); var title = Connection.ExecuteScalar( - "SELECT Title FROM Users WHERE Id = @Id", + "SELECT title FROM users WHERE id = @Id", [new { Id = user2Id.ToString() }] ); var locale = Connection.ExecuteScalar( - "SELECT Locale FROM Users WHERE Id = @Id", + "SELECT locale FROM users WHERE id = @Id", [new { Id = user2Id.ToString() }] ); var emailConfirmed = Connection.ExecuteScalar( - "SELECT EmailConfirmed FROM Users WHERE Id = @Id", + "SELECT email_confirmed FROM users WHERE id = @Id", [new { Id = user2Id.ToString() }] ); @@ -312,32 +316,33 @@ public async Task SwitchTenant_WhenSessionAlreadyRevoked_ShouldReturnUnauthorize var tenant2Id = TenantId.NewId(); var user2Id = UserId.NewId(); - Connection.Insert("Tenants", [ - ("Id", tenant2Id.Value), - ("CreatedAt", TimeProvider.GetUtcNow()), - ("ModifiedAt", null), - ("Name", Faker.Company.CompanyName()), - ("State", nameof(TenantState.Active)), - ("Logo", """{"Url":null,"Version":0}""") + Connection.Insert("tenants", [ + ("id", tenant2Id.Value), + ("created_at", TimeProvider.GetUtcNow()), + ("modified_at", null), + ("name", Faker.Company.CompanyName()), + ("state", nameof(TenantState.Active)), + ("logo", """{"Url":null,"Version":0}"""), + ("plan", nameof(SubscriptionPlan.Basis)) ] ); InsertSubscription(tenant2Id); - Connection.Insert("Users", [ - ("TenantId", tenant2Id.Value), - ("Id", user2Id.ToString()), - ("CreatedAt", TimeProvider.GetUtcNow()), - ("ModifiedAt", null), - ("Email", DatabaseSeeder.Tenant1Member.Email), - ("EmailConfirmed", true), - ("FirstName", Faker.Name.FirstName()), - ("LastName", Faker.Name.LastName()), - ("Title", null), - ("Avatar", JsonSerializer.Serialize(new Avatar())), - ("Role", nameof(UserRole.Member)), - ("Locale", "en-US"), - ("ExternalIdentities", "[]") + Connection.Insert("users", [ + ("tenant_id", tenant2Id.Value), + ("id", user2Id.ToString()), + ("created_at", TimeProvider.GetUtcNow()), + ("modified_at", null), + ("email", DatabaseSeeder.Tenant1Member.Email), + ("email_confirmed", true), + ("first_name", Faker.Name.FirstName()), + ("last_name", Faker.Name.LastName()), + ("title", null), + ("avatar", JsonSerializer.Serialize(new Avatar())), + ("role", nameof(UserRole.Member)), + ("locale", "en-US"), + ("external_identities", "[]") ] ); @@ -360,25 +365,25 @@ public async Task SwitchTenant_WhenSessionAlreadyRevoked_ShouldReturnUnauthorize private void InsertSubscription(TenantId tenantId) { - Connection.Insert("Subscriptions", [ - ("TenantId", tenantId.Value), - ("Id", SubscriptionId.NewId().ToString()), - ("CreatedAt", TimeProvider.GetUtcNow()), - ("ModifiedAt", null), - ("Plan", nameof(SubscriptionPlan.Basis)), - ("ScheduledPlan", null), - ("StripeCustomerId", null), - ("StripeSubscriptionId", null), - ("CurrentPriceAmount", null), - ("CurrentPriceCurrency", null), - ("CurrentPeriodEnd", null), - ("CancelAtPeriodEnd", false), - ("FirstPaymentFailedAt", null), - ("CancellationReason", null), - ("CancellationFeedback", null), - ("PaymentTransactions", "[]"), - ("PaymentMethod", null), - ("BillingInfo", null) + Connection.Insert("subscriptions", [ + ("tenant_id", tenantId.Value), + ("id", SubscriptionId.NewId().ToString()), + ("created_at", TimeProvider.GetUtcNow()), + ("modified_at", null), + ("plan", nameof(SubscriptionPlan.Basis)), + ("scheduled_plan", null), + ("stripe_customer_id", null), + ("stripe_subscription_id", null), + ("current_price_amount", null), + ("current_price_currency", null), + ("current_period_end", null), + ("cancel_at_period_end", false), + ("first_payment_failed_at", null), + ("cancellation_reason", null), + ("cancellation_feedback", null), + ("payment_transactions", "[]"), + ("payment_method", null), + ("billing_info", null) ] ); } diff --git a/application/account/Tests/Billing/ConfirmPaymentMethodSetupTests.cs b/application/account/Tests/Billing/ConfirmPaymentMethodSetupTests.cs index b16aa91ca6..b6fd6d6f37 100644 --- a/application/account/Tests/Billing/ConfirmPaymentMethodSetupTests.cs +++ b/application/account/Tests/Billing/ConfirmPaymentMethodSetupTests.cs @@ -16,11 +16,11 @@ public sealed class ConfirmPaymentMethodSetupTests : EndpointBaseTest { protected override void Dispose(bool disposing) @@ -23,11 +24,11 @@ protected override void Dispose(bool disposing) public async Task RetryPendingInvoicePayment_WhenOpenInvoicePaid_ShouldReturnPaid() { // Arrange - Connection.Update("Subscriptions", "TenantId", DatabaseSeeder.Tenant1.Id.Value, [ - ("Plan", nameof(SubscriptionPlan.Standard)), - ("StripeCustomerId", "cus_test_123"), - ("StripeSubscriptionId", "sub_test_123"), - ("CurrentPeriodEnd", TimeProvider.GetUtcNow().AddDays(30)) + Connection.Update("subscriptions", "tenant_id", DatabaseSeeder.Tenant1.Id.Value, [ + ("plan", nameof(SubscriptionPlan.Standard)), + ("stripe_customer_id", "cus_test_123"), + ("stripe_subscription_id", "sub_test_123"), + ("current_period_end", TimeProvider.GetUtcNow().AddDays(30)) ] ); MockStripeClient.SimulateOpenInvoice = true; @@ -49,11 +50,11 @@ public async Task RetryPendingInvoicePayment_WhenOpenInvoicePaid_ShouldReturnPai public async Task RetryPendingInvoicePayment_WhenNoOpenInvoice_ShouldReturnBadRequest() { // Arrange - Connection.Update("Subscriptions", "TenantId", DatabaseSeeder.Tenant1.Id.Value, [ - ("Plan", nameof(SubscriptionPlan.Standard)), - ("StripeCustomerId", "cus_test_123"), - ("StripeSubscriptionId", "sub_test_123"), - ("CurrentPeriodEnd", TimeProvider.GetUtcNow().AddDays(30)) + Connection.Update("subscriptions", "tenant_id", DatabaseSeeder.Tenant1.Id.Value, [ + ("plan", nameof(SubscriptionPlan.Standard)), + ("stripe_customer_id", "cus_test_123"), + ("stripe_subscription_id", "sub_test_123"), + ("current_period_end", TimeProvider.GetUtcNow().AddDays(30)) ] ); @@ -78,8 +79,8 @@ public async Task RetryPendingInvoicePayment_WhenNonOwner_ShouldReturnForbidden( public async Task RetryPendingInvoicePayment_WhenNoStripeSubscription_ShouldReturnBadRequest() { // Arrange - Connection.Update("Subscriptions", "TenantId", DatabaseSeeder.Tenant1.Id.Value, [ - ("StripeCustomerId", "cus_test_123") + Connection.Update("subscriptions", "tenant_id", DatabaseSeeder.Tenant1.Id.Value, [ + ("stripe_customer_id", "cus_test_123") ] ); diff --git a/application/account/Tests/Billing/UpdateBillingInfoTests.cs b/application/account/Tests/Billing/UpdateBillingInfoTests.cs index 2b2569815d..f02e634c4c 100644 --- a/application/account/Tests/Billing/UpdateBillingInfoTests.cs +++ b/application/account/Tests/Billing/UpdateBillingInfoTests.cs @@ -17,11 +17,11 @@ public sealed class UpdateBillingInfoTests : EndpointBaseTest public async Task UpdateBillingInfo_WhenValid_ShouldSucceed() { // Arrange - Connection.Update("Subscriptions", "TenantId", DatabaseSeeder.Tenant1.Id.Value, [ - ("Plan", nameof(SubscriptionPlan.Standard)), - ("StripeCustomerId", "cus_test_123"), - ("StripeSubscriptionId", "sub_test_123"), - ("CurrentPeriodEnd", TimeProvider.GetUtcNow().AddDays(30)) + Connection.Update("subscriptions", "tenant_id", DatabaseSeeder.Tenant1.Id.Value, [ + ("plan", nameof(SubscriptionPlan.Standard)), + ("stripe_customer_id", "cus_test_123"), + ("stripe_subscription_id", "sub_test_123"), + ("current_period_end", TimeProvider.GetUtcNow().AddDays(30)) ] ); var command = new UpdateBillingInfoCommand("Test Organization", "Vestergade 12", "1456", "Copenhagen", null, "DK", "billing@example.com", null); @@ -37,11 +37,11 @@ public async Task UpdateBillingInfo_WhenValid_ShouldSucceed() public async Task UpdateBillingInfo_WhenMultiLineAddress_ShouldSplitIntoLine1AndLine2() { // Arrange - Connection.Update("Subscriptions", "TenantId", DatabaseSeeder.Tenant1.Id.Value, [ - ("Plan", nameof(SubscriptionPlan.Standard)), - ("StripeCustomerId", "cus_test_123"), - ("StripeSubscriptionId", "sub_test_123"), - ("CurrentPeriodEnd", TimeProvider.GetUtcNow().AddDays(30)) + Connection.Update("subscriptions", "tenant_id", DatabaseSeeder.Tenant1.Id.Value, [ + ("plan", nameof(SubscriptionPlan.Standard)), + ("stripe_customer_id", "cus_test_123"), + ("stripe_subscription_id", "sub_test_123"), + ("current_period_end", TimeProvider.GetUtcNow().AddDays(30)) ] ); var command = new UpdateBillingInfoCommand("Test Organization", "Vestergade 12\nFloor 3", "1456", "Copenhagen", null, "DK", "billing@example.com", null); @@ -70,11 +70,11 @@ public async Task UpdateBillingInfo_WhenNoStripeCustomer_ShouldCreateCustomerAnd public async Task UpdateBillingInfo_WhenNonOwner_ShouldReturnForbidden() { // Arrange - Connection.Update("Subscriptions", "TenantId", DatabaseSeeder.Tenant1.Id.Value, [ - ("Plan", nameof(SubscriptionPlan.Standard)), - ("StripeCustomerId", "cus_test_123"), - ("StripeSubscriptionId", "sub_test_123"), - ("CurrentPeriodEnd", TimeProvider.GetUtcNow().AddDays(30)) + Connection.Update("subscriptions", "tenant_id", DatabaseSeeder.Tenant1.Id.Value, [ + ("plan", nameof(SubscriptionPlan.Standard)), + ("stripe_customer_id", "cus_test_123"), + ("stripe_subscription_id", "sub_test_123"), + ("current_period_end", TimeProvider.GetUtcNow().AddDays(30)) ] ); var command = new UpdateBillingInfoCommand("Test Organization", "Vestergade 12", "1456", "Copenhagen", null, "DK", "billing@example.com", null); @@ -91,11 +91,11 @@ public async Task UpdateBillingInfo_WhenNonOwner_ShouldReturnForbidden() public async Task UpdateBillingInfo_WhenInvalidTaxId_ShouldReturnBadRequest() { // Arrange - Connection.Update("Subscriptions", "TenantId", DatabaseSeeder.Tenant1.Id.Value, [ - ("Plan", nameof(SubscriptionPlan.Standard)), - ("StripeCustomerId", "cus_test_123"), - ("StripeSubscriptionId", "sub_test_123"), - ("CurrentPeriodEnd", TimeProvider.GetUtcNow().AddDays(30)) + Connection.Update("subscriptions", "tenant_id", DatabaseSeeder.Tenant1.Id.Value, [ + ("plan", nameof(SubscriptionPlan.Standard)), + ("stripe_customer_id", "cus_test_123"), + ("stripe_subscription_id", "sub_test_123"), + ("current_period_end", TimeProvider.GetUtcNow().AddDays(30)) ] ); var command = new UpdateBillingInfoCommand("Test Organization", "Vestergade 12", "1456", "Copenhagen", null, "DK", "billing@example.com", "INVALID"); diff --git a/application/account/Tests/EmailAuthentication/CompleteEmailLoginTests.cs b/application/account/Tests/EmailAuthentication/CompleteEmailLoginTests.cs index 067ec93477..88ff12234b 100644 --- a/application/account/Tests/EmailAuthentication/CompleteEmailLoginTests.cs +++ b/application/account/Tests/EmailAuthentication/CompleteEmailLoginTests.cs @@ -36,7 +36,7 @@ public async Task CompleteEmailLogin_WhenValid_ShouldCompleteEmailLoginAndCreate // Assert await response.ShouldBeSuccessfulPostRequest(hasLocation: false); var updatedEmailLoginCount = Connection.ExecuteScalar( - "SELECT COUNT(*) FROM EmailLogins WHERE Id = @id AND Completed = 1", [new { id = emailLoginId.ToString() }] + "SELECT COUNT(*) FROM email_logins WHERE id = @id AND completed = 1", [new { id = emailLoginId.ToString() }] ); updatedEmailLoginCount.Should().Be(1); @@ -82,7 +82,7 @@ public async Task CompleteEmailLogin_WhenInvalidOneTimePassword_ShouldReturnBadR // Assert await response.ShouldHaveErrorStatusCode(HttpStatusCode.BadRequest, "The code is wrong or no longer valid."); - var updatedRetryCount = Connection.ExecuteScalar("SELECT RetryCount FROM EmailLogins WHERE Id = @id", [new { id = emailLoginId.ToString() }]); + var updatedRetryCount = Connection.ExecuteScalar("SELECT retry_count FROM email_logins WHERE id = @id", [new { id = emailLoginId.ToString() }]); updatedRetryCount.Should().Be(1); TelemetryEventsCollectorSpy.CollectedEvents.Count.Should().Be(2); @@ -127,7 +127,7 @@ public async Task CompleteEmailLogin_WhenRetryCountExceeded_ShouldReturnForbidde await response.ShouldHaveErrorStatusCode(HttpStatusCode.Forbidden, "Too many attempts, please request a new code."); var updatedRetryCount = Connection.ExecuteScalar( - "SELECT RetryCount FROM EmailLogins WHERE Id = @id", [new { id = emailLoginId.ToString() }] + "SELECT retry_count FROM email_logins WHERE id = @id", [new { id = emailLoginId.ToString() }] ); updatedRetryCount.Should().Be(4); @@ -146,16 +146,16 @@ public async Task CompleteEmailLogin_WhenEmailLoginExpired_ShouldReturnBadReques // Arrange var emailLoginId = EmailLoginId.NewId(); - Connection.Insert("EmailLogins", [ - ("Id", emailLoginId.ToString()), - ("CreatedAt", TimeProvider.GetUtcNow().AddMinutes(-10)), - ("ModifiedAt", null), - ("Email", DatabaseSeeder.Tenant1Owner.Email), - ("Type", nameof(EmailLoginType.Login)), - ("OneTimePasswordHash", new PasswordHasher().HashPassword(this, CorrectOneTimePassword)), - ("RetryCount", 0), - ("ResendCount", 0), - ("Completed", false) + Connection.Insert("email_logins", [ + ("id", emailLoginId.ToString()), + ("created_at", TimeProvider.GetUtcNow().AddMinutes(-10)), + ("modified_at", null), + ("email", DatabaseSeeder.Tenant1Owner.Email), + ("type", nameof(EmailLoginType.Login)), + ("one_time_password_hash", new PasswordHasher().HashPassword(this, CorrectOneTimePassword)), + ("retry_count", 0), + ("resend_count", 0), + ("completed", false) ] ); @@ -176,7 +176,7 @@ public async Task CompleteEmailLogin_WhenEmailLoginExpired_ShouldReturnBadReques public async Task CompleteEmailLogin_WhenUserInviteCompleted_ShouldTrackUserInviteAcceptedEvent() { // Arrange - Connection.Update("Tenants", "Id", DatabaseSeeder.Tenant1.Id.ToString(), [("Name", "Test Company")]); + Connection.Update("tenants", "id", DatabaseSeeder.Tenant1.Id.ToString(), [("name", "Test Company")]); var email = Faker.Internet.UniqueEmail(); var inviteUserCommand = new InviteUserCommand(email); @@ -193,7 +193,7 @@ public async Task CompleteEmailLogin_WhenUserInviteCompleted_ShouldTrackUserInvi // Assert await response.ShouldBeSuccessfulPostRequest(hasLocation: false); Connection.ExecuteScalar( - "SELECT COUNT(*) FROM Users WHERE TenantId = @tenantId AND Email = @email AND EmailConfirmed = 1", + "SELECT COUNT(*) FROM users WHERE tenant_id = @tenantId AND email = @email AND email_confirmed = 1", [new { tenantId = DatabaseSeeder.Tenant1.Id.ToString(), email = email.ToLower() }] ).Should().Be(1); @@ -212,52 +212,53 @@ public async Task CompleteEmailLogin_WithValidPreferredTenant_ShouldLoginToPrefe var tenant2Id = TenantId.NewId(); var user2Id = UserId.NewId(); - Connection.Insert("Tenants", [ - ("Id", tenant2Id.Value), - ("CreatedAt", TimeProvider.GetUtcNow()), - ("ModifiedAt", null), - ("Name", Faker.Company.CompanyName()), - ("State", nameof(TenantState.Active)), - ("Logo", """{"Url":null,"Version":0}""") + Connection.Insert("tenants", [ + ("id", tenant2Id.Value), + ("created_at", TimeProvider.GetUtcNow()), + ("modified_at", null), + ("name", Faker.Company.CompanyName()), + ("state", nameof(TenantState.Active)), + ("logo", """{"Url":null,"Version":0}"""), + ("plan", nameof(SubscriptionPlan.Basis)) ] ); - Connection.Insert("Subscriptions", [ - ("TenantId", tenant2Id.Value), - ("Id", SubscriptionId.NewId().ToString()), - ("CreatedAt", TimeProvider.GetUtcNow()), - ("ModifiedAt", null), - ("Plan", nameof(SubscriptionPlan.Basis)), - ("ScheduledPlan", null), - ("StripeCustomerId", null), - ("StripeSubscriptionId", null), - ("CurrentPriceAmount", null), - ("CurrentPriceCurrency", null), - ("CurrentPeriodEnd", null), - ("CancelAtPeriodEnd", false), - ("FirstPaymentFailedAt", null), - ("CancellationReason", null), - ("CancellationFeedback", null), - ("PaymentTransactions", "[]"), - ("PaymentMethod", null), - ("BillingInfo", null) + Connection.Insert("subscriptions", [ + ("tenant_id", tenant2Id.Value), + ("id", SubscriptionId.NewId().ToString()), + ("created_at", TimeProvider.GetUtcNow()), + ("modified_at", null), + ("plan", nameof(SubscriptionPlan.Basis)), + ("scheduled_plan", null), + ("stripe_customer_id", null), + ("stripe_subscription_id", null), + ("current_price_amount", null), + ("current_price_currency", null), + ("current_period_end", null), + ("cancel_at_period_end", false), + ("first_payment_failed_at", null), + ("cancellation_reason", null), + ("cancellation_feedback", null), + ("payment_transactions", "[]"), + ("payment_method", null), + ("billing_info", null) ] ); - Connection.Insert("Users", [ - ("TenantId", tenant2Id.Value), - ("Id", user2Id.ToString()), - ("CreatedAt", TimeProvider.GetUtcNow()), - ("ModifiedAt", null), - ("Email", DatabaseSeeder.Tenant1Owner.Email), - ("EmailConfirmed", true), - ("FirstName", Faker.Name.FirstName()), - ("LastName", Faker.Name.LastName()), - ("Title", null), - ("Avatar", JsonSerializer.Serialize(new Avatar())), - ("Role", nameof(UserRole.Owner)), - ("Locale", "en-US"), - ("ExternalIdentities", "[]") + Connection.Insert("users", [ + ("tenant_id", tenant2Id.Value), + ("id", user2Id.ToString()), + ("created_at", TimeProvider.GetUtcNow()), + ("modified_at", null), + ("email", DatabaseSeeder.Tenant1Owner.Email), + ("email_confirmed", true), + ("first_name", Faker.Name.FirstName()), + ("last_name", Faker.Name.LastName()), + ("title", null), + ("avatar", JsonSerializer.Serialize(new Avatar())), + ("role", nameof(UserRole.Owner)), + ("locale", "en-US"), + ("external_identities", "[]") ] ); @@ -310,13 +311,14 @@ public async Task CompleteEmailLogin_WithPreferredTenantUserDoesNotHaveAccess_Sh // Arrange var tenant2Id = TenantId.NewId(); - Connection.Insert("Tenants", [ - ("Id", tenant2Id.Value), - ("CreatedAt", TimeProvider.GetUtcNow()), - ("ModifiedAt", null), - ("Name", Faker.Company.CompanyName()), - ("State", nameof(TenantState.Active)), - ("Logo", """{"Url":null,"Version":0}""") + Connection.Insert("tenants", [ + ("id", tenant2Id.Value), + ("created_at", TimeProvider.GetUtcNow()), + ("modified_at", null), + ("name", Faker.Company.CompanyName()), + ("state", nameof(TenantState.Active)), + ("logo", """{"Url":null,"Version":0}"""), + ("plan", nameof(SubscriptionPlan.Basis)) ] ); diff --git a/application/account/Tests/EmailAuthentication/StartEmailLoginTests.cs b/application/account/Tests/EmailAuthentication/StartEmailLoginTests.cs index 73fe02a557..7a0e00a84d 100644 --- a/application/account/Tests/EmailAuthentication/StartEmailLoginTests.cs +++ b/application/account/Tests/EmailAuthentication/StartEmailLoginTests.cs @@ -128,16 +128,16 @@ public async Task StartEmailLogin_WhenTooManyAttempts_ShouldReturnTooManyRequest for (var i = 1; i <= 4; i++) { var oneTimePasswordHash = new PasswordHasher().HashPassword(this, OneTimePasswordHelper.GenerateOneTimePassword(6)); - Connection.Insert("EmailLogins", [ - ("Id", EmailLoginId.NewId().ToString()), - ("CreatedAt", TimeProvider.GetUtcNow().AddMinutes(-10)), - ("ModifiedAt", null), - ("Email", email.ToLower()), - ("Type", nameof(EmailLoginType.Login)), - ("OneTimePasswordHash", oneTimePasswordHash), - ("RetryCount", 0), - ("ResendCount", 0), - ("Completed", false) + Connection.Insert("email_logins", [ + ("id", EmailLoginId.NewId().ToString()), + ("created_at", TimeProvider.GetUtcNow().AddMinutes(-10)), + ("modified_at", null), + ("email", email.ToLower()), + ("type", nameof(EmailLoginType.Login)), + ("one_time_password_hash", oneTimePasswordHash), + ("retry_count", 0), + ("resend_count", 0), + ("completed", false) ] ); } @@ -159,21 +159,21 @@ public async Task StartEmailLogin_WhenUserIsSoftDeleted_ShouldReturnFakeEmailLog { // Arrange var email = Faker.Internet.UniqueEmail(); - Connection.Insert("Users", [ - ("TenantId", DatabaseSeeder.Tenant1.Id.ToString()), - ("Id", UserId.NewId().ToString()), - ("CreatedAt", TimeProvider.GetUtcNow().AddDays(-30)), - ("ModifiedAt", TimeProvider.GetUtcNow().AddDays(-1)), - ("DeletedAt", TimeProvider.GetUtcNow().AddDays(-1)), - ("Email", email.ToLower()), - ("FirstName", Faker.Person.FirstName), - ("LastName", Faker.Person.LastName), - ("Title", "Former Employee"), - ("Role", nameof(UserRole.Member)), - ("EmailConfirmed", true), - ("Avatar", JsonSerializer.Serialize(new Avatar())), - ("Locale", "en-US"), - ("ExternalIdentities", "[]") + Connection.Insert("users", [ + ("tenant_id", DatabaseSeeder.Tenant1.Id.ToString()), + ("id", UserId.NewId().ToString()), + ("created_at", TimeProvider.GetUtcNow().AddDays(-30)), + ("modified_at", TimeProvider.GetUtcNow().AddDays(-1)), + ("deleted_at", TimeProvider.GetUtcNow().AddDays(-1)), + ("email", email.ToLower()), + ("first_name", Faker.Person.FirstName), + ("last_name", Faker.Person.LastName), + ("title", "Former Employee"), + ("role", nameof(UserRole.Member)), + ("email_confirmed", true), + ("avatar", JsonSerializer.Serialize(new Avatar())), + ("locale", "en-US"), + ("external_identities", "[]") ] ); diff --git a/application/account/Tests/EndpointBaseTest.cs b/application/account/Tests/EndpointBaseTest.cs index c09e9d3c64..ad0ee0655c 100644 --- a/application/account/Tests/EndpointBaseTest.cs +++ b/application/account/Tests/EndpointBaseTest.cs @@ -55,18 +55,18 @@ protected EndpointBaseTest() Connection = new SqliteConnection($"Data Source=TestDb_{Guid.NewGuid():N};Mode=Memory;Cache=Shared"); Connection.Open(); - // Configure SQLite to behave more like SQL Server + // Configure SQLite to behave more like PostgreSQL using (var command = Connection.CreateCommand()) { - // Enable foreign key constraints (SQL Server has this by default) + // Enable foreign key constraints (PostgreSQL has this by default) command.CommandText = "PRAGMA foreign_keys = ON;"; command.ExecuteNonQuery(); - // Enable recursive triggers (SQL Server supports nested triggers) + // Enable recursive triggers (PostgreSQL supports nested triggers) command.CommandText = "PRAGMA recursive_triggers = ON;"; command.ExecuteNonQuery(); - // Enforce CHECK constraints (SQL Server enforces these by default) + // Enforce CHECK constraints (PostgreSQL enforces these by default) command.CommandText = "PRAGMA ignore_check_constraints = OFF;"; command.ExecuteNonQuery(); @@ -75,7 +75,7 @@ protected EndpointBaseTest() command.ExecuteNonQuery(); } - Services.AddDbContext(options => { options.UseSqlite(Connection); }); + Services.AddDbContext(options => { options.UseSqlite(Connection).UseSnakeCaseNamingConvention(); }); Services.AddAccountServices(); @@ -109,7 +109,7 @@ protected EndpointBaseTest() { // Replace the default DbContext in the WebApplication to use an in-memory SQLite database services.Remove(services.Single(d => d.ServiceType == typeof(IDbContextOptionsConfiguration))); - services.AddDbContext(options => { options.UseSqlite(Connection); }); + services.AddDbContext(options => { options.UseSqlite(Connection).UseSnakeCaseNamingConvention(); }); TelemetryEventsCollectorSpy = new TelemetryEventsCollectorSpy(new TelemetryEventsCollector()); services.AddScoped(_ => TelemetryEventsCollectorSpy); diff --git a/application/account/Tests/ExternalAuthentication/CompleteExternalLoginTests.cs b/application/account/Tests/ExternalAuthentication/CompleteExternalLoginTests.cs index 751a27769f..680400be55 100644 --- a/application/account/Tests/ExternalAuthentication/CompleteExternalLoginTests.cs +++ b/application/account/Tests/ExternalAuthentication/CompleteExternalLoginTests.cs @@ -164,7 +164,7 @@ public async Task CompleteExternalLogin_WhenExpired_ShouldRedirectToErrorPage() response.Headers.Location!.ToString().Should().Contain("/error?error=session_expired"); var loginResult = Connection.ExecuteScalar( - "SELECT LoginResult FROM ExternalLogins WHERE Id = @id", [new { id = externalLoginId }] + "SELECT login_result FROM external_logins WHERE id = @id", [new { id = externalLoginId }] ); loginResult.Should().Be(nameof(ExternalLoginResult.LoginExpired)); @@ -191,7 +191,7 @@ public async Task CompleteExternalLogin_WhenNonceMismatch_ShouldRedirectToErrorP response.Headers.Location!.ToString().Should().Contain("/error?error=authentication_failed"); var loginResult = Connection.ExecuteScalar( - "SELECT LoginResult FROM ExternalLogins WHERE Id = @id", [new { id = externalLoginId }] + "SELECT login_result FROM external_logins WHERE id = @id", [new { id = externalLoginId }] ); loginResult.Should().Be(nameof(ExternalLoginResult.NonceMismatch)); @@ -204,20 +204,20 @@ public async Task CompleteExternalLogin_WhenNonceMismatch_ShouldRedirectToErrorP public async Task CompleteExternalLogin_WhenUserHasNoExternalIdentity_ShouldLinkIdentityAndCreateSession() { // Arrange - Connection.Insert("Users", [ - ("TenantId", DatabaseSeeder.Tenant1.Id.ToString()), - ("Id", UserId.NewId().ToString()), - ("CreatedAt", TimeProvider.GetUtcNow()), - ("ModifiedAt", null), - ("Email", MockOAuthProvider.MockEmail), - ("EmailConfirmed", true), - ("FirstName", Faker.Name.FirstName()), - ("LastName", Faker.Name.LastName()), - ("Title", null), - ("Avatar", JsonSerializer.Serialize(new Avatar())), - ("Role", nameof(UserRole.Member)), - ("Locale", "en-US"), - ("ExternalIdentities", "[]") + Connection.Insert("users", [ + ("tenant_id", DatabaseSeeder.Tenant1.Id.ToString()), + ("id", UserId.NewId().ToString()), + ("created_at", TimeProvider.GetUtcNow()), + ("modified_at", null), + ("email", MockOAuthProvider.MockEmail), + ("email_confirmed", true), + ("first_name", Faker.Name.FirstName()), + ("last_name", Faker.Name.LastName()), + ("title", null), + ("avatar", JsonSerializer.Serialize(new Avatar())), + ("role", nameof(UserRole.Member)), + ("locale", "en-US"), + ("external_identities", "[]") ] ); var (callbackUrl, cookies) = await StartLoginFlow(); @@ -231,7 +231,7 @@ public async Task CompleteExternalLogin_WhenUserHasNoExternalIdentity_ShouldLink response.Headers.Location!.ToString().Should().Be("/"); var externalIdentities = Connection.ExecuteScalar( - "SELECT ExternalIdentities FROM Users WHERE Email = @email", [new { email = MockOAuthProvider.MockEmail }] + "SELECT external_identities FROM users WHERE email = @email", [new { email = MockOAuthProvider.MockEmail }] ); externalIdentities.Should().Contain(MockOAuthProvider.MockProviderUserId); @@ -245,20 +245,20 @@ public async Task CompleteExternalLogin_WhenUserHasNoExternalIdentity_ShouldLink public async Task CompleteExternalLogin_WhenInvitedUserHasNoName_ShouldUpdateNameFromGoogleProfile() { // Arrange - Connection.Insert("Users", [ - ("TenantId", DatabaseSeeder.Tenant1.Id.ToString()), - ("Id", UserId.NewId().ToString()), - ("CreatedAt", TimeProvider.GetUtcNow()), - ("ModifiedAt", null), - ("Email", MockOAuthProvider.MockEmail), - ("EmailConfirmed", false), - ("FirstName", null), - ("LastName", null), - ("Title", null), - ("Avatar", JsonSerializer.Serialize(new Avatar())), - ("Role", nameof(UserRole.Member)), - ("Locale", "en-US"), - ("ExternalIdentities", "[]") + Connection.Insert("users", [ + ("tenant_id", DatabaseSeeder.Tenant1.Id.ToString()), + ("id", UserId.NewId().ToString()), + ("created_at", TimeProvider.GetUtcNow()), + ("modified_at", null), + ("email", MockOAuthProvider.MockEmail), + ("email_confirmed", false), + ("first_name", null), + ("last_name", null), + ("title", null), + ("avatar", JsonSerializer.Serialize(new Avatar())), + ("role", nameof(UserRole.Member)), + ("locale", "en-US"), + ("external_identities", "[]") ] ); var (callbackUrl, cookies) = await StartLoginFlow(); @@ -272,10 +272,10 @@ public async Task CompleteExternalLogin_WhenInvitedUserHasNoName_ShouldUpdateNam response.Headers.Location!.ToString().Should().Be("/"); var firstName = Connection.ExecuteScalar( - "SELECT FirstName FROM Users WHERE Email = @email", [new { email = MockOAuthProvider.MockEmail }] + "SELECT first_name FROM users WHERE email = @email", [new { email = MockOAuthProvider.MockEmail }] ); var lastName = Connection.ExecuteScalar( - "SELECT LastName FROM Users WHERE Email = @email", [new { email = MockOAuthProvider.MockEmail }] + "SELECT last_name FROM users WHERE email = @email", [new { email = MockOAuthProvider.MockEmail }] ); firstName.Should().Be(MockOAuthProvider.MockFirstName); lastName.Should().Be(MockOAuthProvider.MockLastName); @@ -287,20 +287,20 @@ public async Task CompleteExternalLogin_WhenUserAlreadyHasName_ShouldNotOverwrit // Arrange var existingFirstName = Faker.Name.FirstName(); var existingLastName = Faker.Name.LastName(); - Connection.Insert("Users", [ - ("TenantId", DatabaseSeeder.Tenant1.Id.ToString()), - ("Id", UserId.NewId().ToString()), - ("CreatedAt", TimeProvider.GetUtcNow()), - ("ModifiedAt", null), - ("Email", MockOAuthProvider.MockEmail), - ("EmailConfirmed", true), - ("FirstName", existingFirstName), - ("LastName", existingLastName), - ("Title", null), - ("Avatar", JsonSerializer.Serialize(new Avatar())), - ("Role", nameof(UserRole.Member)), - ("Locale", "en-US"), - ("ExternalIdentities", "[]") + Connection.Insert("users", [ + ("tenant_id", DatabaseSeeder.Tenant1.Id.ToString()), + ("id", UserId.NewId().ToString()), + ("created_at", TimeProvider.GetUtcNow()), + ("modified_at", null), + ("email", MockOAuthProvider.MockEmail), + ("email_confirmed", true), + ("first_name", existingFirstName), + ("last_name", existingLastName), + ("title", null), + ("avatar", JsonSerializer.Serialize(new Avatar())), + ("role", nameof(UserRole.Member)), + ("locale", "en-US"), + ("external_identities", "[]") ] ); var (callbackUrl, cookies) = await StartLoginFlow(); @@ -314,10 +314,10 @@ public async Task CompleteExternalLogin_WhenUserAlreadyHasName_ShouldNotOverwrit response.Headers.Location!.ToString().Should().Be("/"); var firstName = Connection.ExecuteScalar( - "SELECT FirstName FROM Users WHERE Email = @email", [new { email = MockOAuthProvider.MockEmail }] + "SELECT first_name FROM users WHERE email = @email", [new { email = MockOAuthProvider.MockEmail }] ); var lastName = Connection.ExecuteScalar( - "SELECT LastName FROM Users WHERE Email = @email", [new { email = MockOAuthProvider.MockEmail }] + "SELECT last_name FROM users WHERE email = @email", [new { email = MockOAuthProvider.MockEmail }] ); firstName.Should().Be(existingFirstName); lastName.Should().Be(existingLastName); @@ -371,7 +371,7 @@ public async Task CompleteExternalLogin_WhenValid_ShouldMarkCompletedInDatabase( // Assert var loginResult = Connection.ExecuteScalar( - "SELECT LoginResult FROM ExternalLogins WHERE Id = @id", [new { id = externalLoginId }] + "SELECT login_result FROM external_logins WHERE id = @id", [new { id = externalLoginId }] ); loginResult.Should().Be(nameof(ExternalLoginResult.Success)); } @@ -389,7 +389,7 @@ public async Task CompleteExternalLogin_WhenUserNotFound_ShouldMarkFailedInDatab // Assert var loginResult = Connection.ExecuteScalar( - "SELECT LoginResult FROM ExternalLogins WHERE Id = @id", [new { id = externalLoginId }] + "SELECT login_result FROM external_logins WHERE id = @id", [new { id = externalLoginId }] ); loginResult.Should().Be(nameof(ExternalLoginResult.UserNotFound)); } @@ -401,70 +401,71 @@ public async Task CompleteExternalLogin_WithValidPreferredTenant_ShouldLoginToPr var tenant2Id = TenantId.NewId(); var user2Id = UserId.NewId(); - Connection.Insert("Tenants", [ - ("Id", tenant2Id.Value), - ("CreatedAt", TimeProvider.GetUtcNow()), - ("ModifiedAt", null), - ("Name", Faker.Company.CompanyName()), - ("State", nameof(TenantState.Active)), - ("Logo", """{"Url":null,"Version":0}""") + Connection.Insert("tenants", [ + ("id", tenant2Id.Value), + ("created_at", TimeProvider.GetUtcNow()), + ("modified_at", null), + ("name", Faker.Company.CompanyName()), + ("state", nameof(TenantState.Active)), + ("logo", """{"Url":null,"Version":0}"""), + ("plan", nameof(SubscriptionPlan.Basis)) ] ); - Connection.Insert("Subscriptions", [ - ("TenantId", tenant2Id.Value), - ("Id", SubscriptionId.NewId().ToString()), - ("CreatedAt", TimeProvider.GetUtcNow()), - ("ModifiedAt", null), - ("Plan", nameof(SubscriptionPlan.Basis)), - ("ScheduledPlan", null), - ("StripeCustomerId", null), - ("StripeSubscriptionId", null), - ("CurrentPriceAmount", null), - ("CurrentPriceCurrency", null), - ("CurrentPeriodEnd", null), - ("CancelAtPeriodEnd", false), - ("FirstPaymentFailedAt", null), - ("CancellationReason", null), - ("CancellationFeedback", null), - ("PaymentTransactions", "[]"), - ("PaymentMethod", null), - ("BillingInfo", null) + Connection.Insert("subscriptions", [ + ("tenant_id", tenant2Id.Value), + ("id", SubscriptionId.NewId().ToString()), + ("created_at", TimeProvider.GetUtcNow()), + ("modified_at", null), + ("plan", nameof(SubscriptionPlan.Basis)), + ("scheduled_plan", null), + ("stripe_customer_id", null), + ("stripe_subscription_id", null), + ("current_price_amount", null), + ("current_price_currency", null), + ("current_period_end", null), + ("cancel_at_period_end", false), + ("first_payment_failed_at", null), + ("cancellation_reason", null), + ("cancellation_feedback", null), + ("payment_transactions", "[]"), + ("payment_method", null), + ("billing_info", null) ] ); var identities = JsonSerializer.Serialize(new[] { new { Provider = nameof(ExternalProviderType.Google), ProviderUserId = MockOAuthProvider.MockProviderUserId } }); - Connection.Insert("Users", [ - ("TenantId", DatabaseSeeder.Tenant1.Id.ToString()), - ("Id", UserId.NewId().ToString()), - ("CreatedAt", TimeProvider.GetUtcNow()), - ("ModifiedAt", null), - ("Email", MockOAuthProvider.MockEmail), - ("EmailConfirmed", true), - ("FirstName", Faker.Name.FirstName()), - ("LastName", Faker.Name.LastName()), - ("Title", null), - ("Avatar", JsonSerializer.Serialize(new Avatar())), - ("Role", nameof(UserRole.Member)), - ("Locale", "en-US"), - ("ExternalIdentities", identities) + Connection.Insert("users", [ + ("tenant_id", DatabaseSeeder.Tenant1.Id.ToString()), + ("id", UserId.NewId().ToString()), + ("created_at", TimeProvider.GetUtcNow()), + ("modified_at", null), + ("email", MockOAuthProvider.MockEmail), + ("email_confirmed", true), + ("first_name", Faker.Name.FirstName()), + ("last_name", Faker.Name.LastName()), + ("title", null), + ("avatar", JsonSerializer.Serialize(new Avatar())), + ("role", nameof(UserRole.Member)), + ("locale", "en-US"), + ("external_identities", identities) ] ); - Connection.Insert("Users", [ - ("TenantId", tenant2Id.Value), - ("Id", user2Id.ToString()), - ("CreatedAt", TimeProvider.GetUtcNow()), - ("ModifiedAt", null), - ("Email", MockOAuthProvider.MockEmail), - ("EmailConfirmed", true), - ("FirstName", Faker.Name.FirstName()), - ("LastName", Faker.Name.LastName()), - ("Title", null), - ("Avatar", JsonSerializer.Serialize(new Avatar())), - ("Role", nameof(UserRole.Owner)), - ("Locale", "en-US"), - ("ExternalIdentities", identities) + Connection.Insert("users", [ + ("tenant_id", tenant2Id.Value), + ("id", user2Id.ToString()), + ("created_at", TimeProvider.GetUtcNow()), + ("modified_at", null), + ("email", MockOAuthProvider.MockEmail), + ("email_confirmed", true), + ("first_name", Faker.Name.FirstName()), + ("last_name", Faker.Name.LastName()), + ("title", null), + ("avatar", JsonSerializer.Serialize(new Avatar())), + ("role", nameof(UserRole.Owner)), + ("locale", "en-US"), + ("external_identities", identities) ] ); @@ -479,7 +480,7 @@ public async Task CompleteExternalLogin_WithValidPreferredTenant_ShouldLoginToPr response.Headers.Location!.ToString().Should().Be("/"); var sessionTenantId = Connection.ExecuteScalar( - "SELECT TenantId FROM Sessions WHERE UserId = @userId ORDER BY CreatedAt DESC LIMIT 1", [new { userId = user2Id.ToString() }] + "SELECT tenant_id FROM sessions WHERE user_id = @userId ORDER BY created_at DESC LIMIT 1", [new { userId = user2Id.ToString() }] ); sessionTenantId.Should().Be(tenant2Id.Value); @@ -524,7 +525,7 @@ public async Task CompleteExternalLogin_WhenFlowIdMismatch_ShouldRedirectToError var externalLoginId = GetExternalLoginIdFromUrl(callbackUrl1); var loginResult = Connection.ExecuteScalar( - "SELECT LoginResult FROM ExternalLogins WHERE Id = @id", [new { id = externalLoginId }] + "SELECT login_result FROM external_logins WHERE id = @id", [new { id = externalLoginId }] ); loginResult.Should().Be(nameof(ExternalLoginResult.FlowIdMismatch)); @@ -578,13 +579,14 @@ public async Task CompleteExternalLogin_WithPreferredTenantUserDoesNotHaveAccess // Arrange var tenant2Id = TenantId.NewId(); - Connection.Insert("Tenants", [ - ("Id", tenant2Id.Value), - ("CreatedAt", TimeProvider.GetUtcNow()), - ("ModifiedAt", null), - ("Name", Faker.Company.CompanyName()), - ("State", nameof(TenantState.Active)), - ("Logo", """{"Url":null,"Version":0}""") + Connection.Insert("tenants", [ + ("id", tenant2Id.Value), + ("created_at", TimeProvider.GetUtcNow()), + ("modified_at", null), + ("name", Faker.Company.CompanyName()), + ("state", nameof(TenantState.Active)), + ("logo", """{"Url":null,"Version":0}"""), + ("plan", nameof(SubscriptionPlan.Basis)) ] ); diff --git a/application/account/Tests/ExternalAuthentication/CompleteExternalSignupTests.cs b/application/account/Tests/ExternalAuthentication/CompleteExternalSignupTests.cs index cbdc124d18..617ca77c67 100644 --- a/application/account/Tests/ExternalAuthentication/CompleteExternalSignupTests.cs +++ b/application/account/Tests/ExternalAuthentication/CompleteExternalSignupTests.cs @@ -24,11 +24,11 @@ public async Task CompleteExternalSignup_WhenValid_ShouldCreateTenantUserAndSess response.Headers.Location!.ToString().Should().Be("/onboarding"); var userCount = Connection.ExecuteScalar( - "SELECT COUNT(*) FROM Users WHERE Email = @email", [new { email = MockOAuthProvider.MockEmail }] + "SELECT COUNT(*) FROM users WHERE email = @email", [new { email = MockOAuthProvider.MockEmail }] ); userCount.Should().Be(1); - var tenantCount = Connection.ExecuteScalar("SELECT COUNT(*) FROM Tenants", []); + var tenantCount = Connection.ExecuteScalar("SELECT COUNT(*) FROM tenants", []); tenantCount.Should().BeGreaterThan(1); TelemetryEventsCollectorSpy.CollectedEvents.Should().Contain(e => e.GetType().Name == "TenantCreated"); @@ -55,7 +55,7 @@ public async Task CompleteExternalSignup_WhenUserAlreadyExists_ShouldRedirectToE var externalLoginId = GetExternalLoginIdFromUrl(callbackUrl); var loginResult = Connection.ExecuteScalar( - "SELECT LoginResult FROM ExternalLogins WHERE Id = @id", [new { id = externalLoginId }] + "SELECT login_result FROM external_logins WHERE id = @id", [new { id = externalLoginId }] ); loginResult.Should().Be(nameof(ExternalLoginResult.AccountAlreadyExists)); @@ -133,7 +133,7 @@ public async Task CompleteExternalSignup_WhenExpired_ShouldRedirectToErrorPage() response.Headers.Location!.ToString().Should().Contain("/error?error=session_expired"); var loginResult = Connection.ExecuteScalar( - "SELECT LoginResult FROM ExternalLogins WHERE Id = @id", [new { id = externalLoginId }] + "SELECT login_result FROM external_logins WHERE id = @id", [new { id = externalLoginId }] ); loginResult.Should().Be(nameof(ExternalLoginResult.LoginExpired)); @@ -159,7 +159,7 @@ public async Task CompleteExternalSignup_WhenNonceMismatch_ShouldRedirectToError response.Headers.Location!.ToString().Should().Contain("/error?error=authentication_failed"); var loginResult = Connection.ExecuteScalar( - "SELECT LoginResult FROM ExternalLogins WHERE Id = @id", [new { id = externalLoginId }] + "SELECT login_result FROM external_logins WHERE id = @id", [new { id = externalLoginId }] ); loginResult.Should().Be(nameof(ExternalLoginResult.NonceMismatch)); @@ -203,7 +203,7 @@ public async Task CompleteExternalSignup_WhenFlowIdMismatch_ShouldRedirectToErro var externalLoginId = GetExternalLoginIdFromUrl(callbackUrl1); var loginResult = Connection.ExecuteScalar( - "SELECT LoginResult FROM ExternalLogins WHERE Id = @id", [new { id = externalLoginId }] + "SELECT login_result FROM external_logins WHERE id = @id", [new { id = externalLoginId }] ); loginResult.Should().Be(nameof(ExternalLoginResult.FlowIdMismatch)); @@ -280,7 +280,7 @@ public async Task CompleteExternalSignup_WhenValid_ShouldMarkCompletedInDatabase // Assert var loginResult = Connection.ExecuteScalar( - "SELECT LoginResult FROM ExternalLogins WHERE Id = @id", [new { id = externalLoginId }] + "SELECT login_result FROM external_logins WHERE id = @id", [new { id = externalLoginId }] ); loginResult.Should().Be(nameof(ExternalLoginResult.Success)); } @@ -297,7 +297,7 @@ public async Task CompleteExternalSignup_WhenValid_ShouldLinkExternalIdentity() // Assert var externalIdentities = Connection.ExecuteScalar( - "SELECT ExternalIdentities FROM Users WHERE Email = @email", [new { email = MockOAuthProvider.MockEmail }] + "SELECT external_identities FROM users WHERE email = @email", [new { email = MockOAuthProvider.MockEmail }] ); externalIdentities.Should().Contain(MockOAuthProvider.MockProviderUserId); } diff --git a/application/account/Tests/ExternalAuthentication/ExternalAuthenticationTestBase.cs b/application/account/Tests/ExternalAuthentication/ExternalAuthenticationTestBase.cs index 3adaf270ea..f97b072571 100644 --- a/application/account/Tests/ExternalAuthentication/ExternalAuthenticationTestBase.cs +++ b/application/account/Tests/ExternalAuthentication/ExternalAuthenticationTestBase.cs @@ -68,7 +68,7 @@ protected ExternalAuthenticationTestBase() var services = new ServiceCollection(); services.AddLogging(); services.AddTransient(); - services.AddDbContext(options => { options.UseSqlite(Connection); }); + services.AddDbContext(options => { options.UseSqlite(Connection).UseSnakeCaseNamingConvention(); }); services.AddAccountServices(); TelemetryEventsCollectorSpy = new TelemetryEventsCollectorSpy(new TelemetryEventsCollector()); @@ -104,7 +104,7 @@ protected ExternalAuthenticationTestBase() builder.ConfigureTestServices(testServices => { testServices.Remove(testServices.Single(d => d.ServiceType == typeof(IDbContextOptionsConfiguration))); - testServices.AddDbContext(options => { options.UseSqlite(Connection); }); + testServices.AddDbContext(options => { options.UseSqlite(Connection).UseSnakeCaseNamingConvention(); }); TelemetryEventsCollectorSpy = new TelemetryEventsCollectorSpy(new TelemetryEventsCollector()); testServices.AddScoped(_ => TelemetryEventsCollectorSpy); @@ -206,32 +206,32 @@ protected string GetExternalLoginIdFromUrl(string url) protected void ExpireExternalLogin(string externalLoginId) { var expiredTime = TimeProvider.GetUtcNow().AddSeconds(-(ExternalLogin.ValidForSeconds + 1)); - Connection.Update("ExternalLogins", "Id", externalLoginId, [("CreatedAt", expiredTime)]); + Connection.Update("external_logins", "id", externalLoginId, [("created_at", expiredTime)]); } protected void TamperWithNonce(string externalLoginId) { - Connection.Update("ExternalLogins", "Id", externalLoginId, [("Nonce", "tampered-nonce-value")]); + Connection.Update("external_logins", "id", externalLoginId, [("nonce", "tampered-nonce-value")]); } protected UserId InsertUserWithExternalIdentity(string email, ExternalProviderType providerType, string providerUserId) { var userId = UserId.NewId(); var identities = JsonSerializer.Serialize(new[] { new { Provider = providerType.ToString(), ProviderUserId = providerUserId } }); - Connection.Insert("Users", [ - ("TenantId", DatabaseSeeder.Tenant1.Id.ToString()), - ("Id", userId.ToString()), - ("CreatedAt", TimeProvider.GetUtcNow()), - ("ModifiedAt", null), - ("Email", email.ToLower()), - ("EmailConfirmed", true), - ("FirstName", Faker.Name.FirstName()), - ("LastName", Faker.Name.LastName()), - ("Title", null), - ("Avatar", JsonSerializer.Serialize(new Avatar())), - ("Role", nameof(UserRole.Member)), - ("Locale", "en-US"), - ("ExternalIdentities", identities) + Connection.Insert("users", [ + ("tenant_id", DatabaseSeeder.Tenant1.Id.ToString()), + ("id", userId.ToString()), + ("created_at", TimeProvider.GetUtcNow()), + ("modified_at", null), + ("email", email.ToLower()), + ("email_confirmed", true), + ("first_name", Faker.Name.FirstName()), + ("last_name", Faker.Name.LastName()), + ("title", null), + ("avatar", JsonSerializer.Serialize(new Avatar())), + ("role", nameof(UserRole.Member)), + ("locale", "en-US"), + ("external_identities", identities) ] ); return userId; diff --git a/application/account/Tests/ExternalAuthentication/StartExternalLoginTests.cs b/application/account/Tests/ExternalAuthentication/StartExternalLoginTests.cs index e93d1eefe1..03bd0136f5 100644 --- a/application/account/Tests/ExternalAuthentication/StartExternalLoginTests.cs +++ b/application/account/Tests/ExternalAuthentication/StartExternalLoginTests.cs @@ -24,10 +24,10 @@ public async Task StartExternalLogin_WhenValidProvider_ShouldRedirectToAuthoriza location.Should().Contain("state="); var externalLoginId = GetExternalLoginIdFromResponse(response); - Connection.RowExists("ExternalLogins", externalLoginId).Should().BeTrue(); + Connection.RowExists("external_logins", externalLoginId).Should().BeTrue(); var loginType = Connection.ExecuteScalar( - "SELECT Type FROM ExternalLogins WHERE Id = @id", [new { id = externalLoginId }] + "SELECT type FROM external_logins WHERE id = @id", [new { id = externalLoginId }] ); loginType.Should().Be(nameof(ExternalLoginType.Login)); diff --git a/application/account/Tests/ExternalAuthentication/StartExternalSignupTests.cs b/application/account/Tests/ExternalAuthentication/StartExternalSignupTests.cs index 97c8afb62c..4a9ac84c20 100644 --- a/application/account/Tests/ExternalAuthentication/StartExternalSignupTests.cs +++ b/application/account/Tests/ExternalAuthentication/StartExternalSignupTests.cs @@ -24,10 +24,10 @@ public async Task StartExternalSignup_WhenValidProvider_ShouldRedirectToAuthoriz location.Should().Contain("state="); var externalLoginId = GetExternalLoginIdFromResponse(response); - Connection.RowExists("ExternalLogins", externalLoginId).Should().BeTrue(); + Connection.RowExists("external_logins", externalLoginId).Should().BeTrue(); var loginType = Connection.ExecuteScalar( - "SELECT Type FROM ExternalLogins WHERE Id = @id", [new { id = externalLoginId }] + "SELECT type FROM external_logins WHERE id = @id", [new { id = externalLoginId }] ); loginType.Should().Be(nameof(ExternalLoginType.Signup)); diff --git a/application/account/Tests/Signups/CompleteEmailSignupTests.cs b/application/account/Tests/Signups/CompleteEmailSignupTests.cs index 5a38f62376..ecd59c7a1f 100644 --- a/application/account/Tests/Signups/CompleteEmailSignupTests.cs +++ b/application/account/Tests/Signups/CompleteEmailSignupTests.cs @@ -40,7 +40,7 @@ public async Task CompleteSignup_WhenValid_ShouldCreateTenantAndOwnerUser() // Assert await response.ShouldBeSuccessfulPostRequest(hasLocation: false); - Connection.ExecuteScalar("SELECT COUNT(*) FROM Users WHERE Email = @email", [new { email = email.ToLower() }]).Should().Be(1); + Connection.ExecuteScalar("SELECT COUNT(*) FROM users WHERE email = @email", [new { email = email.ToLower() }]).Should().Be(1); TelemetryEventsCollectorSpy.CollectedEvents.Count.Should().Be(5); TelemetryEventsCollectorSpy.CollectedEvents[0].GetType().Name.Should().Be("SignupStarted"); @@ -141,16 +141,16 @@ public async Task CompleteSignup_WhenSignupExpired_ShouldReturnBadRequest() var email = Faker.Internet.UniqueEmail(); var emailLoginId = EmailLoginId.NewId(); - Connection.Insert("EmailLogins", [ - ("Id", emailLoginId.ToString()), - ("CreatedAt", TimeProvider.GetUtcNow().AddMinutes(-10)), - ("ModifiedAt", null), - ("Email", email), - ("Type", nameof(EmailLoginType.Signup)), - ("OneTimePasswordHash", new PasswordHasher().HashPassword(this, CorrectOneTimePassword)), - ("RetryCount", 0), - ("ResendCount", 0), - ("Completed", false) + Connection.Insert("email_logins", [ + ("id", emailLoginId.ToString()), + ("created_at", TimeProvider.GetUtcNow().AddMinutes(-10)), + ("modified_at", null), + ("email", email), + ("type", nameof(EmailLoginType.Signup)), + ("one_time_password_hash", new PasswordHasher().HashPassword(this, CorrectOneTimePassword)), + ("retry_count", 0), + ("resend_count", 0), + ("completed", false) ] ); diff --git a/application/account/Tests/Signups/StartEmailSignupTests.cs b/application/account/Tests/Signups/StartEmailSignupTests.cs index b20afe79d5..9f316d0fa9 100644 --- a/application/account/Tests/Signups/StartEmailSignupTests.cs +++ b/application/account/Tests/Signups/StartEmailSignupTests.cs @@ -76,16 +76,16 @@ public async Task StartSignup_WhenTooManyAttempts_ShouldReturnTooManyRequests() for (var i = 1; i <= 4; i++) { var oneTimePasswordHash = new PasswordHasher().HashPassword(this, OneTimePasswordHelper.GenerateOneTimePassword(6)); - Connection.Insert("EmailLogins", [ - ("Id", EmailLoginId.NewId().ToString()), - ("CreatedAt", TimeProvider.GetUtcNow().AddMinutes(-10)), - ("ModifiedAt", null), - ("Email", email), - ("Type", nameof(EmailLoginType.Signup)), - ("OneTimePasswordHash", oneTimePasswordHash), - ("RetryCount", 0), - ("ResendCount", 0), - ("Completed", false) + Connection.Insert("email_logins", [ + ("id", EmailLoginId.NewId().ToString()), + ("created_at", TimeProvider.GetUtcNow().AddMinutes(-10)), + ("modified_at", null), + ("email", email), + ("type", nameof(EmailLoginType.Signup)), + ("one_time_password_hash", oneTimePasswordHash), + ("retry_count", 0), + ("resend_count", 0), + ("completed", false) ] ); } diff --git a/application/account/Tests/Subscriptions/AcknowledgeStripeWebhookTests.cs b/application/account/Tests/Subscriptions/AcknowledgeStripeWebhookTests.cs index 579a6f8148..057126a71e 100644 --- a/application/account/Tests/Subscriptions/AcknowledgeStripeWebhookTests.cs +++ b/application/account/Tests/Subscriptions/AcknowledgeStripeWebhookTests.cs @@ -11,6 +11,7 @@ namespace Account.Tests.Subscriptions; +[Collection("StripeTests")] public sealed class AcknowledgeStripeWebhookTests : EndpointBaseTest { private const string WebhookUrl = "/api/account/subscriptions/stripe-webhook"; @@ -24,15 +25,15 @@ protected override void Dispose(bool disposing) private void SetupSubscription(string? stripeCustomerId = MockStripeClient.MockCustomerId, string? stripeSubscriptionId = MockStripeClient.MockSubscriptionId, string plan = nameof(SubscriptionPlan.Standard), DateTimeOffset? firstPaymentFailedAt = null, string? cancellationReason = null) { var hasStripeSubscription = stripeSubscriptionId is not null; - Connection.Update("Subscriptions", "TenantId", DatabaseSeeder.Tenant1.Id.Value, [ - ("Plan", plan), - ("StripeCustomerId", stripeCustomerId), - ("StripeSubscriptionId", stripeSubscriptionId), - ("CurrentPriceAmount", hasStripeSubscription ? 29.99m : null), - ("CurrentPriceCurrency", hasStripeSubscription ? "USD" : null), - ("CurrentPeriodEnd", hasStripeSubscription ? TimeProvider.GetUtcNow().AddDays(30) : null), - ("FirstPaymentFailedAt", firstPaymentFailedAt), - ("CancellationReason", cancellationReason) + Connection.Update("subscriptions", "tenant_id", DatabaseSeeder.Tenant1.Id.Value, [ + ("plan", plan), + ("stripe_customer_id", stripeCustomerId), + ("stripe_subscription_id", stripeSubscriptionId), + ("current_price_amount", hasStripeSubscription ? 29.99m : null), + ("current_price_currency", hasStripeSubscription ? "USD" : null), + ("current_period_end", hasStripeSubscription ? TimeProvider.GetUtcNow().AddDays(30) : null), + ("first_payment_failed_at", firstPaymentFailedAt), + ("cancellation_reason", cancellationReason) ] ); } @@ -63,18 +64,18 @@ public async Task AcknowledgeStripeWebhook_WhenDuplicateEvent_ShouldReturnSucces // Arrange SetupSubscription(); var eventId = $"{MockStripeClient.MockWebhookEventId}_duplicate"; - Connection.Insert("StripeEvents", [ - ("TenantId", DatabaseSeeder.Tenant1.Id.Value), - ("Id", eventId), - ("CreatedAt", TimeProvider.GetUtcNow()), - ("ModifiedAt", null), - ("EventType", "checkout.session.completed"), - ("Status", nameof(StripeEventStatus.Processed)), - ("ProcessedAt", TimeProvider.GetUtcNow()), - ("StripeCustomerId", MockStripeClient.MockCustomerId), - ("StripeSubscriptionId", MockStripeClient.MockSubscriptionId), - ("Payload", null), - ("Error", null) + Connection.Insert("stripe_events", [ + ("tenant_id", DatabaseSeeder.Tenant1.Id.Value), + ("id", eventId), + ("created_at", TimeProvider.GetUtcNow()), + ("modified_at", null), + ("event_type", "checkout.session.completed"), + ("status", nameof(StripeEventStatus.Processed)), + ("processed_at", TimeProvider.GetUtcNow()), + ("stripe_customer_id", MockStripeClient.MockCustomerId), + ("stripe_subscription_id", MockStripeClient.MockSubscriptionId), + ("payload", null), + ("error", null) ] ); TelemetryEventsCollectorSpy.Reset(); @@ -130,7 +131,7 @@ public async Task AcknowledgeStripeWebhook_WhenPaymentSucceeded_ShouldClearPayme // Assert response.EnsureSuccessStatusCode(); - var firstPaymentFailed = Connection.ExecuteScalar("SELECT FirstPaymentFailedAt FROM Subscriptions WHERE TenantId = @tenantId", [new { tenantId = DatabaseSeeder.Tenant1.Id.Value }]); + var firstPaymentFailed = Connection.ExecuteScalar("SELECT first_payment_failed_at FROM subscriptions WHERE tenant_id = @tenantId", [new { tenantId = DatabaseSeeder.Tenant1.Id.Value }]); firstPaymentFailed.Should().BeNullOrEmpty(); } @@ -153,10 +154,10 @@ public async Task AcknowledgeStripeWebhook_WhenFirstPaymentFailed_ShouldSetFailu // Assert response.EnsureSuccessStatusCode(); - var firstPaymentFailed = Connection.ExecuteScalar("SELECT FirstPaymentFailedAt FROM Subscriptions WHERE TenantId = @tenantId", [new { tenantId = DatabaseSeeder.Tenant1.Id.Value }]); + var firstPaymentFailed = Connection.ExecuteScalar("SELECT first_payment_failed_at FROM subscriptions WHERE tenant_id = @tenantId", [new { tenantId = DatabaseSeeder.Tenant1.Id.Value }]); firstPaymentFailed.Should().NotBeNullOrEmpty(); - var tenantState = Connection.ExecuteScalar("SELECT State FROM Tenants WHERE Id = @id", [new { id = DatabaseSeeder.Tenant1.Id.Value }]); + var tenantState = Connection.ExecuteScalar("SELECT state FROM tenants WHERE id = @id", [new { id = DatabaseSeeder.Tenant1.Id.Value }]); tenantState.Should().Be(nameof(TenantState.Active)); } @@ -180,7 +181,7 @@ public async Task AcknowledgeStripeWebhook_WhenSubsequentPaymentFailed_ShouldNot // Assert response.EnsureSuccessStatusCode(); - var firstPaymentFailed = Connection.ExecuteScalar("SELECT FirstPaymentFailedAt FROM Subscriptions WHERE TenantId = @tenantId", [new { tenantId = DatabaseSeeder.Tenant1.Id.Value }]); + var firstPaymentFailed = Connection.ExecuteScalar("SELECT first_payment_failed_at FROM subscriptions WHERE tenant_id = @tenantId", [new { tenantId = DatabaseSeeder.Tenant1.Id.Value }]); firstPaymentFailed.Should().NotBeNullOrEmpty(); } @@ -204,10 +205,10 @@ public async Task AcknowledgeStripeWebhook_WhenSubscriptionDeletedInvoluntarily_ // Assert response.EnsureSuccessStatusCode(); - var tenantState = Connection.ExecuteScalar("SELECT State FROM Tenants WHERE Id = @id", [new { id = DatabaseSeeder.Tenant1.Id.Value }]); + var tenantState = Connection.ExecuteScalar("SELECT state FROM tenants WHERE id = @id", [new { id = DatabaseSeeder.Tenant1.Id.Value }]); tenantState.Should().Be(nameof(TenantState.Suspended)); - var suspensionReason = Connection.ExecuteScalar("SELECT SuspensionReason FROM Tenants WHERE Id = @id", [new { id = DatabaseSeeder.Tenant1.Id.Value }]); + var suspensionReason = Connection.ExecuteScalar("SELECT suspension_reason FROM tenants WHERE id = @id", [new { id = DatabaseSeeder.Tenant1.Id.Value }]); suspensionReason.Should().Be(nameof(SuspensionReason.PaymentFailed)); } @@ -230,7 +231,7 @@ public async Task AcknowledgeStripeWebhook_WhenSubscriptionDeletedVoluntarily_Sh // Assert response.EnsureSuccessStatusCode(); - var tenantState = Connection.ExecuteScalar("SELECT State FROM Tenants WHERE Id = @id", [new { id = DatabaseSeeder.Tenant1.Id.Value }]); + var tenantState = Connection.ExecuteScalar("SELECT state FROM tenants WHERE id = @id", [new { id = DatabaseSeeder.Tenant1.Id.Value }]); tenantState.Should().Be(nameof(TenantState.Active)); } @@ -239,7 +240,7 @@ public async Task AcknowledgeStripeWebhook_WhenCheckoutSessionCompleted_ShouldAc { // Arrange SetupSubscription(stripeSubscriptionId: null, plan: nameof(SubscriptionPlan.Basis)); - Connection.Update("Tenants", "Id", DatabaseSeeder.Tenant1.Id.Value, [("State", nameof(TenantState.Suspended)), ("SuspensionReason", nameof(SuspensionReason.PaymentFailed))]); + Connection.Update("tenants", "id", DatabaseSeeder.Tenant1.Id.Value, [("state", nameof(TenantState.Suspended)), ("suspension_reason", nameof(SuspensionReason.PaymentFailed))]); TelemetryEventsCollectorSpy.Reset(); // Act @@ -253,10 +254,10 @@ public async Task AcknowledgeStripeWebhook_WhenCheckoutSessionCompleted_ShouldAc // Assert response.EnsureSuccessStatusCode(); - var tenantState = Connection.ExecuteScalar("SELECT State FROM Tenants WHERE Id = @id", [new { id = DatabaseSeeder.Tenant1.Id.Value }]); + var tenantState = Connection.ExecuteScalar("SELECT state FROM tenants WHERE id = @id", [new { id = DatabaseSeeder.Tenant1.Id.Value }]); tenantState.Should().Be(nameof(TenantState.Active)); - var suspensionReason = Connection.ExecuteScalar("SELECT SuspensionReason FROM Tenants WHERE Id = @id", [new { id = DatabaseSeeder.Tenant1.Id.Value }]); + var suspensionReason = Connection.ExecuteScalar("SELECT suspension_reason FROM tenants WHERE id = @id", [new { id = DatabaseSeeder.Tenant1.Id.Value }]); suspensionReason.Should().BeNullOrEmpty(); } @@ -279,10 +280,10 @@ public async Task AcknowledgeStripeWebhook_WhenCustomerDeleted_ShouldSuspendTena // Assert response.EnsureSuccessStatusCode(); - var tenantState = Connection.ExecuteScalar("SELECT State FROM Tenants WHERE Id = @id", [new { id = DatabaseSeeder.Tenant1.Id.Value }]); + var tenantState = Connection.ExecuteScalar("SELECT state FROM tenants WHERE id = @id", [new { id = DatabaseSeeder.Tenant1.Id.Value }]); tenantState.Should().Be(nameof(TenantState.Suspended)); - var suspensionReason = Connection.ExecuteScalar("SELECT SuspensionReason FROM Tenants WHERE Id = @id", [new { id = DatabaseSeeder.Tenant1.Id.Value }]); + var suspensionReason = Connection.ExecuteScalar("SELECT suspension_reason FROM tenants WHERE id = @id", [new { id = DatabaseSeeder.Tenant1.Id.Value }]); suspensionReason.Should().Be(nameof(SuspensionReason.CustomerDeleted)); } @@ -292,7 +293,7 @@ public async Task AcknowledgeStripeWebhook_WhenSubscriptionDeletedAndTenantAlrea // Arrange - tenant already suspended with CustomerDeleted (e.g., customer.deleted processed in previous batch) MockStripeClient.SimulateSubscriptionDeleted = true; SetupSubscription(cancellationReason: nameof(CancellationReason.NoLongerNeeded)); - Connection.Update("Tenants", "Id", DatabaseSeeder.Tenant1.Id.Value, [("State", nameof(TenantState.Suspended)), ("SuspensionReason", nameof(SuspensionReason.CustomerDeleted))]); + Connection.Update("tenants", "id", DatabaseSeeder.Tenant1.Id.Value, [("state", nameof(TenantState.Suspended)), ("suspension_reason", nameof(SuspensionReason.CustomerDeleted))]); TelemetryEventsCollectorSpy.Reset(); // Act @@ -306,10 +307,10 @@ public async Task AcknowledgeStripeWebhook_WhenSubscriptionDeletedAndTenantAlrea // Assert - tenant should remain Suspended with CustomerDeleted, not overridden to Active or PaymentFailed response.EnsureSuccessStatusCode(); - var tenantState = Connection.ExecuteScalar("SELECT State FROM Tenants WHERE Id = @id", [new { id = DatabaseSeeder.Tenant1.Id.Value }]); + var tenantState = Connection.ExecuteScalar("SELECT state FROM tenants WHERE id = @id", [new { id = DatabaseSeeder.Tenant1.Id.Value }]); tenantState.Should().Be(nameof(TenantState.Suspended)); - var suspensionReason = Connection.ExecuteScalar("SELECT SuspensionReason FROM Tenants WHERE Id = @id", [new { id = DatabaseSeeder.Tenant1.Id.Value }]); + var suspensionReason = Connection.ExecuteScalar("SELECT suspension_reason FROM tenants WHERE id = @id", [new { id = DatabaseSeeder.Tenant1.Id.Value }]); suspensionReason.Should().Be(nameof(SuspensionReason.CustomerDeleted)); } @@ -319,18 +320,18 @@ public async Task AcknowledgeStripeWebhook_WhenCustomerDeletedAndSubscriptionDel // Arrange - pre-insert a pending customer.deleted event so both events process in the same batch MockStripeClient.SimulateCustomerDeleted = true; SetupSubscription(cancellationReason: nameof(CancellationReason.NoLongerNeeded)); - Connection.Insert("StripeEvents", [ - ("TenantId", null), - ("Id", $"{MockStripeClient.MockWebhookEventId}_customer_deleted"), - ("CreatedAt", TimeProvider.GetUtcNow()), - ("ModifiedAt", null), - ("EventType", "customer.deleted"), - ("Status", nameof(StripeEventStatus.Pending)), - ("ProcessedAt", null), - ("StripeCustomerId", MockStripeClient.MockCustomerId), - ("StripeSubscriptionId", null), - ("Payload", null), - ("Error", null) + Connection.Insert("stripe_events", [ + ("tenant_id", null), + ("id", $"{MockStripeClient.MockWebhookEventId}_customer_deleted"), + ("created_at", TimeProvider.GetUtcNow()), + ("modified_at", null), + ("event_type", "customer.deleted"), + ("status", nameof(StripeEventStatus.Pending)), + ("processed_at", null), + ("stripe_customer_id", MockStripeClient.MockCustomerId), + ("stripe_subscription_id", null), + ("payload", null), + ("error", null) ] ); TelemetryEventsCollectorSpy.Reset(); @@ -346,10 +347,10 @@ public async Task AcknowledgeStripeWebhook_WhenCustomerDeletedAndSubscriptionDel // Assert - customer.deleted should take priority, tenant suspended with CustomerDeleted response.EnsureSuccessStatusCode(); - var tenantState = Connection.ExecuteScalar("SELECT State FROM Tenants WHERE Id = @id", [new { id = DatabaseSeeder.Tenant1.Id.Value }]); + var tenantState = Connection.ExecuteScalar("SELECT state FROM tenants WHERE id = @id", [new { id = DatabaseSeeder.Tenant1.Id.Value }]); tenantState.Should().Be(nameof(TenantState.Suspended)); - var suspensionReason = Connection.ExecuteScalar("SELECT SuspensionReason FROM Tenants WHERE Id = @id", [new { id = DatabaseSeeder.Tenant1.Id.Value }]); + var suspensionReason = Connection.ExecuteScalar("SELECT suspension_reason FROM tenants WHERE id = @id", [new { id = DatabaseSeeder.Tenant1.Id.Value }]); suspensionReason.Should().Be(nameof(SuspensionReason.CustomerDeleted)); } @@ -367,10 +368,10 @@ public async Task AcknowledgeStripeWebhook_WhenNoSubscriptionFound_ShouldStoreEv // Assert response.EnsureSuccessStatusCode(); - var eventCount = Connection.ExecuteScalar("SELECT COUNT(*) FROM StripeEvents WHERE StripeCustomerId = @customerId", [new { customerId = MockStripeClient.MockCustomerId }]); + var eventCount = Connection.ExecuteScalar("SELECT COUNT(*) FROM stripe_events WHERE stripe_customer_id = @customerId", [new { customerId = MockStripeClient.MockCustomerId }]); eventCount.Should().Be(1); - var eventStatus = Connection.ExecuteScalar("SELECT Status FROM StripeEvents WHERE StripeCustomerId = @customerId", [new { customerId = MockStripeClient.MockCustomerId }]); + var eventStatus = Connection.ExecuteScalar("SELECT status FROM stripe_events WHERE stripe_customer_id = @customerId", [new { customerId = MockStripeClient.MockCustomerId }]); eventStatus.Should().Be(nameof(StripeEventStatus.Pending)); } diff --git a/application/account/Tests/Subscriptions/CancelSubscriptionTests.cs b/application/account/Tests/Subscriptions/CancelSubscriptionTests.cs index 11c827300e..b9aa91c139 100644 --- a/application/account/Tests/Subscriptions/CancelSubscriptionTests.cs +++ b/application/account/Tests/Subscriptions/CancelSubscriptionTests.cs @@ -16,11 +16,11 @@ public sealed class CancelSubscriptionTests : EndpointBaseTest public async Task CancelSubscription_WhenActiveSubscription_ShouldSucceed() { // Arrange - Connection.Update("Subscriptions", "TenantId", DatabaseSeeder.Tenant1.Id.Value, [ - ("Plan", nameof(SubscriptionPlan.Standard)), - ("StripeCustomerId", "cus_test_123"), - ("StripeSubscriptionId", "sub_test_123"), - ("CurrentPeriodEnd", TimeProvider.GetUtcNow().AddDays(30)) + Connection.Update("subscriptions", "tenant_id", DatabaseSeeder.Tenant1.Id.Value, [ + ("plan", nameof(SubscriptionPlan.Standard)), + ("stripe_customer_id", "cus_test_123"), + ("stripe_subscription_id", "sub_test_123"), + ("current_period_end", TimeProvider.GetUtcNow().AddDays(30)) ] ); var command = new CancelSubscriptionCommand(CancellationReason.TooExpensive, "The price doubled last month."); @@ -55,12 +55,12 @@ public async Task CancelSubscription_WhenBasisSubscription_ShouldReturnBadReques public async Task CancelSubscription_WhenAlreadyCancelled_ShouldReturnBadRequest() { // Arrange - Connection.Update("Subscriptions", "TenantId", DatabaseSeeder.Tenant1.Id.Value, [ - ("Plan", nameof(SubscriptionPlan.Standard)), - ("StripeCustomerId", "cus_test_123"), - ("StripeSubscriptionId", "sub_test_123"), - ("CurrentPeriodEnd", TimeProvider.GetUtcNow().AddDays(30)), - ("CancelAtPeriodEnd", true) + Connection.Update("subscriptions", "tenant_id", DatabaseSeeder.Tenant1.Id.Value, [ + ("plan", nameof(SubscriptionPlan.Standard)), + ("stripe_customer_id", "cus_test_123"), + ("stripe_subscription_id", "sub_test_123"), + ("current_period_end", TimeProvider.GetUtcNow().AddDays(30)), + ("cancel_at_period_end", true) ] ); var command = new CancelSubscriptionCommand(CancellationReason.FoundAlternative, "Switched to competitor."); @@ -79,11 +79,11 @@ public async Task CancelSubscription_WhenAlreadyCancelled_ShouldReturnBadRequest public async Task CancelSubscription_WhenNonOwner_ShouldReturnForbidden() { // Arrange - Connection.Update("Subscriptions", "TenantId", DatabaseSeeder.Tenant1.Id.Value, [ - ("Plan", nameof(SubscriptionPlan.Standard)), - ("StripeCustomerId", "cus_test_123"), - ("StripeSubscriptionId", "sub_test_123"), - ("CurrentPeriodEnd", TimeProvider.GetUtcNow().AddDays(30)) + Connection.Update("subscriptions", "tenant_id", DatabaseSeeder.Tenant1.Id.Value, [ + ("plan", nameof(SubscriptionPlan.Standard)), + ("stripe_customer_id", "cus_test_123"), + ("stripe_subscription_id", "sub_test_123"), + ("current_period_end", TimeProvider.GetUtcNow().AddDays(30)) ] ); var command = new CancelSubscriptionCommand(CancellationReason.Other, "Just testing."); diff --git a/application/account/Tests/Subscriptions/GetCheckoutPreviewTests.cs b/application/account/Tests/Subscriptions/GetCheckoutPreviewTests.cs index fd88f32c0e..b7386090f2 100644 --- a/application/account/Tests/Subscriptions/GetCheckoutPreviewTests.cs +++ b/application/account/Tests/Subscriptions/GetCheckoutPreviewTests.cs @@ -16,8 +16,8 @@ public sealed class GetCheckoutPreviewTests : EndpointBaseTest public async Task GetCheckoutPreview_WhenStandardPlan_ShouldReturnPreview() { // Arrange - Connection.Update("Subscriptions", "TenantId", DatabaseSeeder.Tenant1.Id.Value, [ - ("StripeCustomerId", "cus_test_123") + Connection.Update("subscriptions", "tenant_id", DatabaseSeeder.Tenant1.Id.Value, [ + ("stripe_customer_id", "cus_test_123") ] ); @@ -47,8 +47,8 @@ public async Task GetCheckoutPreview_WhenBasisPlan_ShouldReturnBadRequest() public async Task GetCheckoutPreview_WhenNonOwner_ShouldReturnForbidden() { // Arrange - Connection.Update("Subscriptions", "TenantId", DatabaseSeeder.Tenant1.Id.Value, [ - ("StripeCustomerId", "cus_test_123") + Connection.Update("subscriptions", "tenant_id", DatabaseSeeder.Tenant1.Id.Value, [ + ("stripe_customer_id", "cus_test_123") ] ); diff --git a/application/account/Tests/Subscriptions/GetCurrentSubscriptionTests.cs b/application/account/Tests/Subscriptions/GetCurrentSubscriptionTests.cs index 7765e09ce9..d9ad2d5aac 100644 --- a/application/account/Tests/Subscriptions/GetCurrentSubscriptionTests.cs +++ b/application/account/Tests/Subscriptions/GetCurrentSubscriptionTests.cs @@ -15,13 +15,13 @@ public sealed class GetCurrentSubscriptionTests : EndpointBaseTest public async Task GetUpgradePreview_WhenStandardToPremium_ShouldReturnPreview() { // Arrange - Connection.Update("Subscriptions", "TenantId", DatabaseSeeder.Tenant1.Id.Value, [ - ("Plan", nameof(SubscriptionPlan.Standard)), - ("StripeCustomerId", "cus_test_123"), - ("StripeSubscriptionId", "sub_test_123"), - ("CurrentPeriodEnd", TimeProvider.GetUtcNow().AddDays(30)) + Connection.Update("subscriptions", "tenant_id", DatabaseSeeder.Tenant1.Id.Value, [ + ("plan", nameof(SubscriptionPlan.Standard)), + ("stripe_customer_id", "cus_test_123"), + ("stripe_subscription_id", "sub_test_123"), + ("current_period_end", TimeProvider.GetUtcNow().AddDays(30)) ] ); @@ -49,11 +49,11 @@ public async Task GetUpgradePreview_WhenStandardToPremium_ShouldReturnPreview() public async Task GetUpgradePreview_WhenPlanNotHigher_ShouldReturnBadRequest() { // Arrange - Connection.Update("Subscriptions", "TenantId", DatabaseSeeder.Tenant1.Id.Value, [ - ("Plan", nameof(SubscriptionPlan.Premium)), - ("StripeCustomerId", "cus_test_123"), - ("StripeSubscriptionId", "sub_test_123"), - ("CurrentPeriodEnd", TimeProvider.GetUtcNow().AddDays(30)) + Connection.Update("subscriptions", "tenant_id", DatabaseSeeder.Tenant1.Id.Value, [ + ("plan", nameof(SubscriptionPlan.Premium)), + ("stripe_customer_id", "cus_test_123"), + ("stripe_subscription_id", "sub_test_123"), + ("current_period_end", TimeProvider.GetUtcNow().AddDays(30)) ] ); @@ -68,11 +68,11 @@ public async Task GetUpgradePreview_WhenPlanNotHigher_ShouldReturnBadRequest() public async Task GetUpgradePreview_WhenNonOwner_ShouldReturnForbidden() { // Arrange - Connection.Update("Subscriptions", "TenantId", DatabaseSeeder.Tenant1.Id.Value, [ - ("Plan", nameof(SubscriptionPlan.Standard)), - ("StripeCustomerId", "cus_test_123"), - ("StripeSubscriptionId", "sub_test_123"), - ("CurrentPeriodEnd", TimeProvider.GetUtcNow().AddDays(30)) + Connection.Update("subscriptions", "tenant_id", DatabaseSeeder.Tenant1.Id.Value, [ + ("plan", nameof(SubscriptionPlan.Standard)), + ("stripe_customer_id", "cus_test_123"), + ("stripe_subscription_id", "sub_test_123"), + ("current_period_end", TimeProvider.GetUtcNow().AddDays(30)) ] ); @@ -87,9 +87,9 @@ public async Task GetUpgradePreview_WhenNonOwner_ShouldReturnForbidden() public async Task GetUpgradePreview_WhenNoStripeSubscription_ShouldReturnBadRequest() { // Arrange - Connection.Update("Subscriptions", "TenantId", DatabaseSeeder.Tenant1.Id.Value, [ - ("Plan", nameof(SubscriptionPlan.Standard)), - ("StripeCustomerId", "cus_test_123") + Connection.Update("subscriptions", "tenant_id", DatabaseSeeder.Tenant1.Id.Value, [ + ("plan", nameof(SubscriptionPlan.Standard)), + ("stripe_customer_id", "cus_test_123") ] ); diff --git a/application/account/Tests/Subscriptions/ReactivateSubscriptionTests.cs b/application/account/Tests/Subscriptions/ReactivateSubscriptionTests.cs index b8bf25621c..6eb3e7f69e 100644 --- a/application/account/Tests/Subscriptions/ReactivateSubscriptionTests.cs +++ b/application/account/Tests/Subscriptions/ReactivateSubscriptionTests.cs @@ -16,12 +16,12 @@ public sealed class ReactivateSubscriptionTests : EndpointBaseTest public async Task ScheduleDowngrade_WhenPremiumToStandard_ShouldSucceed() { // Arrange - Connection.Update("Subscriptions", "TenantId", DatabaseSeeder.Tenant1.Id.Value, [ - ("Plan", nameof(SubscriptionPlan.Premium)), - ("StripeCustomerId", "cus_test_123"), - ("StripeSubscriptionId", "sub_test_123"), - ("CurrentPeriodEnd", TimeProvider.GetUtcNow().AddDays(30)) + Connection.Update("subscriptions", "tenant_id", DatabaseSeeder.Tenant1.Id.Value, [ + ("plan", nameof(SubscriptionPlan.Premium)), + ("stripe_customer_id", "cus_test_123"), + ("stripe_subscription_id", "sub_test_123"), + ("current_period_end", TimeProvider.GetUtcNow().AddDays(30)) ] ); var command = new ScheduleDowngradeCommand(SubscriptionPlan.Standard); @@ -39,11 +39,11 @@ public async Task ScheduleDowngrade_WhenPremiumToStandard_ShouldSucceed() public async Task ScheduleDowngrade_WhenPlanNotLower_ShouldReturnBadRequest() { // Arrange - Connection.Update("Subscriptions", "TenantId", DatabaseSeeder.Tenant1.Id.Value, [ - ("Plan", nameof(SubscriptionPlan.Standard)), - ("StripeCustomerId", "cus_test_123"), - ("StripeSubscriptionId", "sub_test_123"), - ("CurrentPeriodEnd", TimeProvider.GetUtcNow().AddDays(30)) + Connection.Update("subscriptions", "tenant_id", DatabaseSeeder.Tenant1.Id.Value, [ + ("plan", nameof(SubscriptionPlan.Standard)), + ("stripe_customer_id", "cus_test_123"), + ("stripe_subscription_id", "sub_test_123"), + ("current_period_end", TimeProvider.GetUtcNow().AddDays(30)) ] ); var command = new ScheduleDowngradeCommand(SubscriptionPlan.Premium); @@ -62,11 +62,11 @@ public async Task ScheduleDowngrade_WhenPlanNotLower_ShouldReturnBadRequest() public async Task ScheduleDowngrade_WhenTargetIsBasis_ShouldReturnBadRequest() { // Arrange - Connection.Update("Subscriptions", "TenantId", DatabaseSeeder.Tenant1.Id.Value, [ - ("Plan", nameof(SubscriptionPlan.Standard)), - ("StripeCustomerId", "cus_test_123"), - ("StripeSubscriptionId", "sub_test_123"), - ("CurrentPeriodEnd", TimeProvider.GetUtcNow().AddDays(30)) + Connection.Update("subscriptions", "tenant_id", DatabaseSeeder.Tenant1.Id.Value, [ + ("plan", nameof(SubscriptionPlan.Standard)), + ("stripe_customer_id", "cus_test_123"), + ("stripe_subscription_id", "sub_test_123"), + ("current_period_end", TimeProvider.GetUtcNow().AddDays(30)) ] ); var command = new ScheduleDowngradeCommand(SubscriptionPlan.Basis); @@ -85,11 +85,11 @@ public async Task ScheduleDowngrade_WhenTargetIsBasis_ShouldReturnBadRequest() public async Task ScheduleDowngrade_WhenNonOwner_ShouldReturnForbidden() { // Arrange - Connection.Update("Subscriptions", "TenantId", DatabaseSeeder.Tenant1.Id.Value, [ - ("Plan", nameof(SubscriptionPlan.Premium)), - ("StripeCustomerId", "cus_test_123"), - ("StripeSubscriptionId", "sub_test_123"), - ("CurrentPeriodEnd", TimeProvider.GetUtcNow().AddDays(30)) + Connection.Update("subscriptions", "tenant_id", DatabaseSeeder.Tenant1.Id.Value, [ + ("plan", nameof(SubscriptionPlan.Premium)), + ("stripe_customer_id", "cus_test_123"), + ("stripe_subscription_id", "sub_test_123"), + ("current_period_end", TimeProvider.GetUtcNow().AddDays(30)) ] ); var command = new ScheduleDowngradeCommand(SubscriptionPlan.Standard); diff --git a/application/account/Tests/Subscriptions/StartSubscriptionCheckoutTests.cs b/application/account/Tests/Subscriptions/StartSubscriptionCheckoutTests.cs index 94fcabb524..4a1adde8cf 100644 --- a/application/account/Tests/Subscriptions/StartSubscriptionCheckoutTests.cs +++ b/application/account/Tests/Subscriptions/StartSubscriptionCheckoutTests.cs @@ -17,9 +17,9 @@ public sealed class StartSubscriptionCheckoutTests : EndpointBaseTest("SELECT DeletedAt FROM Tenants WHERE Id = @id", [new { id = existingTenantId.ToString() }]); + Connection.RowExists("tenants", existingTenantId).Should().BeTrue(); + var deletedAt = Connection.ExecuteScalar("SELECT deleted_at FROM tenants WHERE id = @id", [new { id = existingTenantId.ToString() }]); deletedAt.Should().NotBeNullOrEmpty(); - var ownerDeletedAt = Connection.ExecuteScalar("SELECT DeletedAt FROM Users WHERE Id = @id", [new { id = DatabaseSeeder.Tenant1Owner.Id.ToString() }]); + var ownerDeletedAt = Connection.ExecuteScalar("SELECT deleted_at FROM users WHERE id = @id", [new { id = DatabaseSeeder.Tenant1Owner.Id.ToString() }]); ownerDeletedAt.Should().BeNull(); - var memberDeletedAt = Connection.ExecuteScalar("SELECT DeletedAt FROM Users WHERE Id = @id", [new { id = DatabaseSeeder.Tenant1Member.Id.ToString() }]); + var memberDeletedAt = Connection.ExecuteScalar("SELECT deleted_at FROM users WHERE id = @id", [new { id = DatabaseSeeder.Tenant1Member.Id.ToString() }]); memberDeletedAt.Should().BeNull(); TelemetryEventsCollectorSpy.CollectedEvents.Count.Should().Be(1); @@ -55,11 +55,11 @@ public async Task DeleteTenant_WhenValid_ShouldSoftDeleteTenant() public async Task DeleteTenant_WhenActiveSubscription_ShouldReturnBadRequest() { // Arrange - Connection.Update("Subscriptions", "TenantId", DatabaseSeeder.Tenant1.Id.Value, [ - ("Plan", nameof(SubscriptionPlan.Standard)), - ("StripeCustomerId", "cus_test_123"), - ("StripeSubscriptionId", "sub_test_123"), - ("CurrentPeriodEnd", TimeProvider.GetUtcNow().AddDays(30)) + Connection.Update("subscriptions", "tenant_id", DatabaseSeeder.Tenant1.Id.Value, [ + ("plan", nameof(SubscriptionPlan.Standard)), + ("stripe_customer_id", "cus_test_123"), + ("stripe_subscription_id", "sub_test_123"), + ("current_period_end", TimeProvider.GetUtcNow().AddDays(30)) ] ); diff --git a/application/account/Tests/Tenants/GetTenantsForUserTests.cs b/application/account/Tests/Tenants/GetTenantsForUserTests.cs index 9000bb956c..87fde3a27c 100644 --- a/application/account/Tests/Tenants/GetTenantsForUserTests.cs +++ b/application/account/Tests/Tenants/GetTenantsForUserTests.cs @@ -2,6 +2,7 @@ using System.Net.Http.Json; using System.Text.Json; using Account.Database; +using Account.Features.Subscriptions.Domain; using Account.Features.Tenants.Domain; using Account.Features.Tenants.Queries; using Account.Features.Users.Domain; @@ -23,30 +24,31 @@ public async Task GetTenants_UserWithMultipleTenants_ReturnsAllTenants() var tenant2Name = Faker.Company.CompanyName(); var user2Id = UserId.NewId(); - Connection.Insert("Tenants", [ - ("Id", tenant2Id.Value), - ("CreatedAt", TimeProvider.GetUtcNow()), - ("ModifiedAt", null), - ("Name", tenant2Name), - ("State", nameof(TenantState.Active)), - ("Logo", """{"Url":null,"Version":0}""") + Connection.Insert("tenants", [ + ("id", tenant2Id.Value), + ("created_at", TimeProvider.GetUtcNow()), + ("modified_at", null), + ("name", tenant2Name), + ("state", nameof(TenantState.Active)), + ("logo", """{"Url":null,"Version":0}"""), + ("plan", nameof(SubscriptionPlan.Basis)) ] ); - Connection.Insert("Users", [ - ("TenantId", tenant2Id.Value), - ("Id", user2Id.ToString()), - ("CreatedAt", TimeProvider.GetUtcNow()), - ("ModifiedAt", null), - ("Email", DatabaseSeeder.Tenant1Member.Email), - ("EmailConfirmed", true), - ("FirstName", Faker.Name.FirstName()), - ("LastName", Faker.Name.LastName()), - ("Title", null), - ("Avatar", JsonSerializer.Serialize(new Avatar())), - ("Role", nameof(UserRole.Owner)), - ("Locale", "en-US"), - ("ExternalIdentities", "[]") + Connection.Insert("users", [ + ("tenant_id", tenant2Id.Value), + ("id", user2Id.ToString()), + ("created_at", TimeProvider.GetUtcNow()), + ("modified_at", null), + ("email", DatabaseSeeder.Tenant1Member.Email), + ("email_confirmed", true), + ("first_name", Faker.Name.FirstName()), + ("last_name", Faker.Name.LastName()), + ("title", null), + ("avatar", JsonSerializer.Serialize(new Avatar())), + ("role", nameof(UserRole.Owner)), + ("locale", "en-US"), + ("external_identities", "[]") ] ); @@ -97,30 +99,31 @@ public async Task GetTenants_CurrentTenantIncluded_VerifyCurrentTenantInResponse var otherTenantId = TenantId.NewId(); var otherUserId = UserId.NewId(); - Connection.Insert("Tenants", [ - ("Id", otherTenantId.Value), - ("CreatedAt", TimeProvider.GetUtcNow()), - ("ModifiedAt", null), - ("Name", "Other Tenant"), - ("State", nameof(TenantState.Active)), - ("Logo", """{"Url":null,"Version":0}""") + Connection.Insert("tenants", [ + ("id", otherTenantId.Value), + ("created_at", TimeProvider.GetUtcNow()), + ("modified_at", null), + ("name", "Other Tenant"), + ("state", nameof(TenantState.Active)), + ("logo", """{"Url":null,"Version":0}"""), + ("plan", nameof(SubscriptionPlan.Basis)) ] ); - Connection.Insert("Users", [ - ("TenantId", otherTenantId.Value), - ("Id", otherUserId.ToString()), - ("CreatedAt", TimeProvider.GetUtcNow()), - ("ModifiedAt", null), - ("Email", email), - ("EmailConfirmed", true), - ("FirstName", Faker.Name.FirstName()), - ("LastName", Faker.Name.LastName()), - ("Title", null), - ("Avatar", JsonSerializer.Serialize(new Avatar())), - ("Role", nameof(UserRole.Member)), - ("Locale", "en-US"), - ("ExternalIdentities", "[]") + Connection.Insert("users", [ + ("tenant_id", otherTenantId.Value), + ("id", otherUserId.ToString()), + ("created_at", TimeProvider.GetUtcNow()), + ("modified_at", null), + ("email", email), + ("email_confirmed", true), + ("first_name", Faker.Name.FirstName()), + ("last_name", Faker.Name.LastName()), + ("title", null), + ("avatar", JsonSerializer.Serialize(new Avatar())), + ("role", nameof(UserRole.Member)), + ("locale", "en-US"), + ("external_identities", "[]") ] ); @@ -142,30 +145,31 @@ public async Task GetTenants_UsersOnlySeeTheirOwnTenants_DoesNotReturnOtherUsers var otherUserTenantId = TenantId.NewId(); var otherUserId = UserId.NewId(); - Connection.Insert("Tenants", [ - ("Id", otherUserTenantId.Value), - ("CreatedAt", TimeProvider.GetUtcNow()), - ("ModifiedAt", null), - ("Name", "Other User Tenant"), - ("State", nameof(TenantState.Active)), - ("Logo", """{"Url":null,"Version":0}""") + Connection.Insert("tenants", [ + ("id", otherUserTenantId.Value), + ("created_at", TimeProvider.GetUtcNow()), + ("modified_at", null), + ("name", "Other User Tenant"), + ("state", nameof(TenantState.Active)), + ("logo", """{"Url":null,"Version":0}"""), + ("plan", nameof(SubscriptionPlan.Basis)) ] ); - Connection.Insert("Users", [ - ("TenantId", otherUserTenantId.Value), - ("Id", otherUserId.ToString()), - ("CreatedAt", TimeProvider.GetUtcNow()), - ("ModifiedAt", null), - ("Email", otherUserEmail), - ("EmailConfirmed", true), - ("FirstName", Faker.Name.FirstName()), - ("LastName", Faker.Name.LastName()), - ("Title", null), - ("Avatar", JsonSerializer.Serialize(new Avatar())), - ("Role", nameof(UserRole.Member)), - ("Locale", "en-US"), - ("ExternalIdentities", "[]") + Connection.Insert("users", [ + ("tenant_id", otherUserTenantId.Value), + ("id", otherUserId.ToString()), + ("created_at", TimeProvider.GetUtcNow()), + ("modified_at", null), + ("email", otherUserEmail), + ("email_confirmed", true), + ("first_name", Faker.Name.FirstName()), + ("last_name", Faker.Name.LastName()), + ("title", null), + ("avatar", JsonSerializer.Serialize(new Avatar())), + ("role", nameof(UserRole.Member)), + ("locale", "en-US"), + ("external_identities", "[]") ] ); @@ -188,30 +192,31 @@ public async Task GetTenants_UserWithUnconfirmedEmail_ShowsAsNewTenant() var tenant2Name = Faker.Company.CompanyName(); var user2Id = UserId.NewId(); - Connection.Insert("Tenants", [ - ("Id", tenant2Id.Value), - ("CreatedAt", TimeProvider.GetUtcNow()), - ("ModifiedAt", null), - ("Name", tenant2Name), - ("State", nameof(TenantState.Active)), - ("Logo", """{"Url":null,"Version":0}""") + Connection.Insert("tenants", [ + ("id", tenant2Id.Value), + ("created_at", TimeProvider.GetUtcNow()), + ("modified_at", null), + ("name", tenant2Name), + ("state", nameof(TenantState.Active)), + ("logo", """{"Url":null,"Version":0}"""), + ("plan", nameof(SubscriptionPlan.Basis)) ] ); - Connection.Insert("Users", [ - ("TenantId", tenant2Id.Value), - ("Id", user2Id.ToString()), - ("CreatedAt", TimeProvider.GetUtcNow()), - ("ModifiedAt", null), - ("Email", DatabaseSeeder.Tenant1Member.Email), - ("EmailConfirmed", false), // User has not confirmed email in this tenant - ("FirstName", Faker.Name.FirstName()), - ("LastName", Faker.Name.LastName()), - ("Title", null), - ("Avatar", JsonSerializer.Serialize(new Avatar())), - ("Role", nameof(UserRole.Member)), - ("Locale", "en-US"), - ("ExternalIdentities", "[]") + Connection.Insert("users", [ + ("tenant_id", tenant2Id.Value), + ("id", user2Id.ToString()), + ("created_at", TimeProvider.GetUtcNow()), + ("modified_at", null), + ("email", DatabaseSeeder.Tenant1Member.Email), + ("email_confirmed", false), // User has not confirmed email in this tenant + ("first_name", Faker.Name.FirstName()), + ("last_name", Faker.Name.LastName()), + ("title", null), + ("avatar", JsonSerializer.Serialize(new Avatar())), + ("role", nameof(UserRole.Member)), + ("locale", "en-US"), + ("external_identities", "[]") ] ); diff --git a/application/account/Tests/Tenants/UpdateCurrentTenantTests.cs b/application/account/Tests/Tenants/UpdateCurrentTenantTests.cs index fb7793ab9e..9e111bd23d 100644 --- a/application/account/Tests/Tenants/UpdateCurrentTenantTests.cs +++ b/application/account/Tests/Tenants/UpdateCurrentTenantTests.cs @@ -41,7 +41,7 @@ public async Task UpdateCurrentTenant_WhenInvalid_ShouldReturnBadRequest() // Assert var expectedErrors = new[] { - new ErrorDetail("Name", "Name must be between 1 and 30 characters.") + new ErrorDetail("name", "Name must be between 1 and 30 characters.") }; await response.ShouldHaveErrorStatusCode(HttpStatusCode.BadRequest, expectedErrors); diff --git a/application/account/Tests/Users/BulkDeleteUsersTests.cs b/application/account/Tests/Users/BulkDeleteUsersTests.cs index e300d51667..1d7c74c1af 100644 --- a/application/account/Tests/Users/BulkDeleteUsersTests.cs +++ b/application/account/Tests/Users/BulkDeleteUsersTests.cs @@ -24,21 +24,21 @@ public async Task BulkDeleteUsers_WhenUsersExist_ShouldSoftDeleteUsers() { var userId = UserId.NewId(); userIds.Add(userId); - Connection.Insert("Users", [ - ("TenantId", DatabaseSeeder.Tenant1.Id.ToString()), - ("Id", userId.ToString()), - ("CreatedAt", TimeProvider.GetUtcNow().AddMinutes(-10)), - ("ModifiedAt", null), - ("DeletedAt", null), - ("Email", Faker.Internet.UniqueEmail()), - ("FirstName", Faker.Person.FirstName), - ("LastName", Faker.Person.LastName), - ("Title", "Test User"), - ("Role", nameof(UserRole.Member)), - ("EmailConfirmed", true), - ("Avatar", JsonSerializer.Serialize(new Avatar())), - ("Locale", "en-US"), - ("ExternalIdentities", "[]") + Connection.Insert("users", [ + ("tenant_id", DatabaseSeeder.Tenant1.Id.ToString()), + ("id", userId.ToString()), + ("created_at", TimeProvider.GetUtcNow().AddMinutes(-10)), + ("modified_at", null), + ("deleted_at", null), + ("email", Faker.Internet.UniqueEmail()), + ("first_name", Faker.Person.FirstName), + ("last_name", Faker.Person.LastName), + ("title", "Test User"), + ("role", nameof(UserRole.Member)), + ("email_confirmed", true), + ("avatar", JsonSerializer.Serialize(new Avatar())), + ("locale", "en-US"), + ("external_identities", "[]") ] ); } @@ -52,8 +52,8 @@ public async Task BulkDeleteUsers_WhenUsersExist_ShouldSoftDeleteUsers() response.ShouldHaveEmptyHeaderAndLocationOnSuccess(); foreach (var userId in userIds) { - Connection.RowExists("Users", userId.ToString()).Should().BeTrue(); - var deletedAt = Connection.ExecuteScalar("SELECT DeletedAt FROM Users WHERE Id = @id", [new { id = userId.ToString() }]); + Connection.RowExists("users", userId.ToString()).Should().BeTrue(); + var deletedAt = Connection.ExecuteScalar("SELECT deleted_at FROM users WHERE id = @id", [new { id = userId.ToString() }]); deletedAt.Should().NotBeNullOrEmpty(); } @@ -135,39 +135,39 @@ public async Task BulkDeleteUsers_WhenMixedConfirmedAndUnconfirmed_ShouldSoftDel var confirmedUserId = UserId.NewId(); var unconfirmedUserId = UserId.NewId(); - Connection.Insert("Users", [ - ("TenantId", DatabaseSeeder.Tenant1.Id.ToString()), - ("Id", confirmedUserId.ToString()), - ("CreatedAt", TimeProvider.GetUtcNow().AddMinutes(-10)), - ("ModifiedAt", null), - ("DeletedAt", null), - ("Email", Faker.Internet.UniqueEmail()), - ("FirstName", Faker.Person.FirstName), - ("LastName", Faker.Person.LastName), - ("Title", "Confirmed User"), - ("Role", nameof(UserRole.Member)), - ("EmailConfirmed", true), - ("Avatar", JsonSerializer.Serialize(new Avatar())), - ("Locale", "en-US"), - ("ExternalIdentities", "[]") + Connection.Insert("users", [ + ("tenant_id", DatabaseSeeder.Tenant1.Id.ToString()), + ("id", confirmedUserId.ToString()), + ("created_at", TimeProvider.GetUtcNow().AddMinutes(-10)), + ("modified_at", null), + ("deleted_at", null), + ("email", Faker.Internet.UniqueEmail()), + ("first_name", Faker.Person.FirstName), + ("last_name", Faker.Person.LastName), + ("title", "Confirmed User"), + ("role", nameof(UserRole.Member)), + ("email_confirmed", true), + ("avatar", JsonSerializer.Serialize(new Avatar())), + ("locale", "en-US"), + ("external_identities", "[]") ] ); - Connection.Insert("Users", [ - ("TenantId", DatabaseSeeder.Tenant1.Id.ToString()), - ("Id", unconfirmedUserId.ToString()), - ("CreatedAt", TimeProvider.GetUtcNow().AddMinutes(-10)), - ("ModifiedAt", null), - ("DeletedAt", null), - ("Email", Faker.Internet.UniqueEmail()), - ("FirstName", null), - ("LastName", null), - ("Title", null), - ("Role", nameof(UserRole.Member)), - ("EmailConfirmed", false), - ("Avatar", JsonSerializer.Serialize(new Avatar())), - ("Locale", "en-US"), - ("ExternalIdentities", "[]") + Connection.Insert("users", [ + ("tenant_id", DatabaseSeeder.Tenant1.Id.ToString()), + ("id", unconfirmedUserId.ToString()), + ("created_at", TimeProvider.GetUtcNow().AddMinutes(-10)), + ("modified_at", null), + ("deleted_at", null), + ("email", Faker.Internet.UniqueEmail()), + ("first_name", null), + ("last_name", null), + ("title", null), + ("role", nameof(UserRole.Member)), + ("email_confirmed", false), + ("avatar", JsonSerializer.Serialize(new Avatar())), + ("locale", "en-US"), + ("external_identities", "[]") ] ); @@ -179,12 +179,12 @@ public async Task BulkDeleteUsers_WhenMixedConfirmedAndUnconfirmed_ShouldSoftDel // Assert response.ShouldHaveEmptyHeaderAndLocationOnSuccess(); - Connection.RowExists("Users", confirmedUserId.ToString()).Should().BeTrue(); - var confirmedDeletedAt = Connection.ExecuteScalar("SELECT DeletedAt FROM Users WHERE Id = @id", [new { id = confirmedUserId.ToString() }]); + Connection.RowExists("users", confirmedUserId.ToString()).Should().BeTrue(); + var confirmedDeletedAt = Connection.ExecuteScalar("SELECT deleted_at FROM users WHERE id = @id", [new { id = confirmedUserId.ToString() }]); confirmedDeletedAt.Should().NotBeNullOrEmpty(); - Connection.RowExists("Users", unconfirmedUserId.ToString()).Should().BeTrue(); - var unconfirmedDeletedAt = Connection.ExecuteScalar("SELECT DeletedAt FROM Users WHERE Id = @id", [new { id = unconfirmedUserId.ToString() }]); + Connection.RowExists("users", unconfirmedUserId.ToString()).Should().BeTrue(); + var unconfirmedDeletedAt = Connection.ExecuteScalar("SELECT deleted_at FROM users WHERE id = @id", [new { id = unconfirmedUserId.ToString() }]); unconfirmedDeletedAt.Should().NotBeNullOrEmpty(); TelemetryEventsCollectorSpy.CollectedEvents.Count.Should().Be(3); diff --git a/application/account/Tests/Users/BulkPurgeUsersTests.cs b/application/account/Tests/Users/BulkPurgeUsersTests.cs index 847e68a24b..dc8176296b 100644 --- a/application/account/Tests/Users/BulkPurgeUsersTests.cs +++ b/application/account/Tests/Users/BulkPurgeUsersTests.cs @@ -21,55 +21,55 @@ public async Task BulkPurgeUsers_WhenOwnerDeletesMultipleDeletedUsers_ShouldPerm var deletedUserId1 = UserId.NewId(); var deletedUserId2 = UserId.NewId(); var deletedUserId3 = UserId.NewId(); - Connection.Insert("Users", [ - ("TenantId", DatabaseSeeder.Tenant1.Id.ToString()), - ("Id", deletedUserId1.ToString()), - ("CreatedAt", TimeProvider.GetUtcNow().AddDays(-10)), - ("ModifiedAt", TimeProvider.GetUtcNow().AddDays(-2)), - ("DeletedAt", TimeProvider.GetUtcNow().AddDays(-2)), - ("Email", Faker.Internet.UniqueEmail()), - ("FirstName", Faker.Person.FirstName), - ("LastName", Faker.Person.LastName), - ("Title", "Former Employee 1"), - ("Role", nameof(UserRole.Member)), - ("EmailConfirmed", true), - ("Avatar", JsonSerializer.Serialize(new Avatar())), - ("Locale", "en-US"), - ("ExternalIdentities", "[]") + Connection.Insert("users", [ + ("tenant_id", DatabaseSeeder.Tenant1.Id.ToString()), + ("id", deletedUserId1.ToString()), + ("created_at", TimeProvider.GetUtcNow().AddDays(-10)), + ("modified_at", TimeProvider.GetUtcNow().AddDays(-2)), + ("deleted_at", TimeProvider.GetUtcNow().AddDays(-2)), + ("email", Faker.Internet.UniqueEmail()), + ("first_name", Faker.Person.FirstName), + ("last_name", Faker.Person.LastName), + ("title", "Former Employee 1"), + ("role", nameof(UserRole.Member)), + ("email_confirmed", true), + ("avatar", JsonSerializer.Serialize(new Avatar())), + ("locale", "en-US"), + ("external_identities", "[]") ] ); - Connection.Insert("Users", [ - ("TenantId", DatabaseSeeder.Tenant1.Id.ToString()), - ("Id", deletedUserId2.ToString()), - ("CreatedAt", TimeProvider.GetUtcNow().AddDays(-5)), - ("ModifiedAt", TimeProvider.GetUtcNow().AddDays(-1)), - ("DeletedAt", TimeProvider.GetUtcNow().AddDays(-1)), - ("Email", Faker.Internet.UniqueEmail()), - ("FirstName", Faker.Person.FirstName), - ("LastName", Faker.Person.LastName), - ("Title", "Former Employee 2"), - ("Role", nameof(UserRole.Member)), - ("EmailConfirmed", true), - ("Avatar", JsonSerializer.Serialize(new Avatar())), - ("Locale", "en-US"), - ("ExternalIdentities", "[]") + Connection.Insert("users", [ + ("tenant_id", DatabaseSeeder.Tenant1.Id.ToString()), + ("id", deletedUserId2.ToString()), + ("created_at", TimeProvider.GetUtcNow().AddDays(-5)), + ("modified_at", TimeProvider.GetUtcNow().AddDays(-1)), + ("deleted_at", TimeProvider.GetUtcNow().AddDays(-1)), + ("email", Faker.Internet.UniqueEmail()), + ("first_name", Faker.Person.FirstName), + ("last_name", Faker.Person.LastName), + ("title", "Former Employee 2"), + ("role", nameof(UserRole.Member)), + ("email_confirmed", true), + ("avatar", JsonSerializer.Serialize(new Avatar())), + ("locale", "en-US"), + ("external_identities", "[]") ] ); - Connection.Insert("Users", [ - ("TenantId", DatabaseSeeder.Tenant1.Id.ToString()), - ("Id", deletedUserId3.ToString()), - ("CreatedAt", TimeProvider.GetUtcNow().AddDays(-3)), - ("ModifiedAt", TimeProvider.GetUtcNow().AddDays(-1)), - ("DeletedAt", TimeProvider.GetUtcNow().AddDays(-1)), - ("Email", Faker.Internet.UniqueEmail()), - ("FirstName", Faker.Person.FirstName), - ("LastName", Faker.Person.LastName), - ("Title", "Former Employee 3"), - ("Role", nameof(UserRole.Member)), - ("EmailConfirmed", true), - ("Avatar", JsonSerializer.Serialize(new Avatar())), - ("Locale", "en-US"), - ("ExternalIdentities", "[]") + Connection.Insert("users", [ + ("tenant_id", DatabaseSeeder.Tenant1.Id.ToString()), + ("id", deletedUserId3.ToString()), + ("created_at", TimeProvider.GetUtcNow().AddDays(-3)), + ("modified_at", TimeProvider.GetUtcNow().AddDays(-1)), + ("deleted_at", TimeProvider.GetUtcNow().AddDays(-1)), + ("email", Faker.Internet.UniqueEmail()), + ("first_name", Faker.Person.FirstName), + ("last_name", Faker.Person.LastName), + ("title", "Former Employee 3"), + ("role", nameof(UserRole.Member)), + ("email_confirmed", true), + ("avatar", JsonSerializer.Serialize(new Avatar())), + ("locale", "en-US"), + ("external_identities", "[]") ] ); @@ -79,9 +79,9 @@ public async Task BulkPurgeUsers_WhenOwnerDeletesMultipleDeletedUsers_ShouldPerm // Assert response.ShouldHaveEmptyHeaderAndLocationOnSuccess(); - Connection.RowExists("Users", deletedUserId1.ToString()).Should().BeFalse(); - Connection.RowExists("Users", deletedUserId2.ToString()).Should().BeFalse(); - Connection.RowExists("Users", deletedUserId3.ToString()).Should().BeTrue(); + Connection.RowExists("users", deletedUserId1.ToString()).Should().BeFalse(); + Connection.RowExists("users", deletedUserId2.ToString()).Should().BeFalse(); + Connection.RowExists("users", deletedUserId3.ToString()).Should().BeTrue(); TelemetryEventsCollectorSpy.CollectedEvents.Count.Should().Be(2); TelemetryEventsCollectorSpy.CollectedEvents.Should().AllSatisfy(e => @@ -97,21 +97,21 @@ public async Task BulkPurgeUsers_WhenMember_ShouldReturnForbidden() { // Arrange var deletedUserId = UserId.NewId(); - Connection.Insert("Users", [ - ("TenantId", DatabaseSeeder.Tenant1.Id.ToString()), - ("Id", deletedUserId.ToString()), - ("CreatedAt", TimeProvider.GetUtcNow().AddDays(-10)), - ("ModifiedAt", TimeProvider.GetUtcNow().AddDays(-2)), - ("DeletedAt", TimeProvider.GetUtcNow().AddDays(-2)), - ("Email", Faker.Internet.UniqueEmail()), - ("FirstName", Faker.Person.FirstName), - ("LastName", Faker.Person.LastName), - ("Title", "Former Employee"), - ("Role", nameof(UserRole.Member)), - ("EmailConfirmed", true), - ("Avatar", JsonSerializer.Serialize(new Avatar())), - ("Locale", "en-US"), - ("ExternalIdentities", "[]") + Connection.Insert("users", [ + ("tenant_id", DatabaseSeeder.Tenant1.Id.ToString()), + ("id", deletedUserId.ToString()), + ("created_at", TimeProvider.GetUtcNow().AddDays(-10)), + ("modified_at", TimeProvider.GetUtcNow().AddDays(-2)), + ("deleted_at", TimeProvider.GetUtcNow().AddDays(-2)), + ("email", Faker.Internet.UniqueEmail()), + ("first_name", Faker.Person.FirstName), + ("last_name", Faker.Person.LastName), + ("title", "Former Employee"), + ("role", nameof(UserRole.Member)), + ("email_confirmed", true), + ("avatar", JsonSerializer.Serialize(new Avatar())), + ("locale", "en-US"), + ("external_identities", "[]") ] ); @@ -121,7 +121,7 @@ public async Task BulkPurgeUsers_WhenMember_ShouldReturnForbidden() // Assert await response.ShouldHaveErrorStatusCode(HttpStatusCode.Forbidden, "Only owners can permanently delete users from the recycle bin."); - Connection.RowExists("Users", deletedUserId.ToString()).Should().BeTrue(); + Connection.RowExists("users", deletedUserId.ToString()).Should().BeTrue(); } [Fact] diff --git a/application/account/Tests/Users/ChangeLocaleTests.cs b/application/account/Tests/Users/ChangeLocaleTests.cs index ea12c09190..b31464b9a3 100644 --- a/application/account/Tests/Users/ChangeLocaleTests.cs +++ b/application/account/Tests/Users/ChangeLocaleTests.cs @@ -26,7 +26,7 @@ public async Task ChangeLocale_WhenValidLocale_ShouldUpdateUserLocaleAndCollectE response.ShouldHaveEmptyHeaderAndLocationOnSuccess(); var updatedLocale = Connection.ExecuteScalar( - "SELECT Locale FROM Users WHERE Id = @id", [new { id = DatabaseSeeder.Tenant1Owner.Id.ToString() }] + "SELECT locale FROM users WHERE id = @id", [new { id = DatabaseSeeder.Tenant1Owner.Id.ToString() }] ); updatedLocale.Should().Be(newLocale); @@ -51,7 +51,7 @@ public async Task ChangeLocale_WhenMemberChangesLocale_ShouldSucceed() response.ShouldHaveEmptyHeaderAndLocationOnSuccess(); var updatedLocale = Connection.ExecuteScalar( - "SELECT Locale FROM Users WHERE Id = @id", [new { id = DatabaseSeeder.Tenant1Member.Id.ToString() }] + "SELECT locale FROM users WHERE id = @id", [new { id = DatabaseSeeder.Tenant1Member.Id.ToString() }] ); updatedLocale.Should().Be(newLocale); @@ -102,7 +102,7 @@ public async Task ChangeLocale_WhenChangingToSameLocale_ShouldSucceed() { // Arrange var locale = "en-US"; - Connection.Update("Users", "Id", DatabaseSeeder.Tenant1Owner.Id.ToString(), [("Locale", locale)]); + Connection.Update("users", "id", DatabaseSeeder.Tenant1Owner.Id.ToString(), [("locale", locale)]); var command = new ChangeLocaleCommand(locale); // Act @@ -112,7 +112,7 @@ public async Task ChangeLocale_WhenChangingToSameLocale_ShouldSucceed() response.ShouldHaveEmptyHeaderAndLocationOnSuccess(); var updatedLocale = Connection.ExecuteScalar( - "SELECT Locale FROM Users WHERE Id = @id", [new { id = DatabaseSeeder.Tenant1Owner.Id.ToString() }] + "SELECT locale FROM users WHERE id = @id", [new { id = DatabaseSeeder.Tenant1Owner.Id.ToString() }] ); updatedLocale.Should().Be(locale); diff --git a/application/account/Tests/Users/ChangeUserRoleTests.cs b/application/account/Tests/Users/ChangeUserRoleTests.cs index 3f0ae7c65c..f5ab90dc9a 100644 --- a/application/account/Tests/Users/ChangeUserRoleTests.cs +++ b/application/account/Tests/Users/ChangeUserRoleTests.cs @@ -28,7 +28,7 @@ public async Task ChangeUserRole_WhenOwnerChangesAnotherUserRole_ShouldSucceed() response.ShouldHaveEmptyHeaderAndLocationOnSuccess(); var updatedRole = Connection.ExecuteScalar( - "SELECT Role FROM Users WHERE Id = @id", [new { id = DatabaseSeeder.Tenant1Member.Id.ToString() }] + "SELECT role FROM users WHERE id = @id", [new { id = DatabaseSeeder.Tenant1Member.Id.ToString() }] ); updatedRole.Should().Be(nameof(UserRole.Owner)); @@ -44,7 +44,7 @@ public async Task ChangeUserRole_WhenOwnerChangesAnotherUserRole_ShouldSucceed() public async Task ChangeUserRole_WhenOwnerChangesRoleFromOwnerToMember_ShouldSucceed() { // Arrange - Connection.Update("Users", "Id", DatabaseSeeder.Tenant1Member.Id.ToString(), [("Role", nameof(UserRole.Owner))]); + Connection.Update("users", "id", DatabaseSeeder.Tenant1Member.Id.ToString(), [("role", nameof(UserRole.Owner))]); var command = new ChangeUserRoleCommand { UserRole = UserRole.Member }; // Act @@ -56,7 +56,7 @@ public async Task ChangeUserRole_WhenOwnerChangesRoleFromOwnerToMember_ShouldSuc response.ShouldHaveEmptyHeaderAndLocationOnSuccess(); var updatedRole = Connection.ExecuteScalar( - "SELECT Role FROM Users WHERE Id = @id", [new { id = DatabaseSeeder.Tenant1Member.Id.ToString() }] + "SELECT role FROM users WHERE id = @id", [new { id = DatabaseSeeder.Tenant1Member.Id.ToString() }] ); updatedRole.Should().Be(nameof(UserRole.Member)); @@ -81,7 +81,7 @@ public async Task ChangeUserRole_WhenOwnerTriesToChangeTheirOwnRole_ShouldReturn await response.ShouldHaveErrorStatusCode(HttpStatusCode.Forbidden, "You cannot change your own user role."); var roleUnchanged = Connection.ExecuteScalar( - "SELECT Role FROM Users WHERE Id = @id", [new { id = DatabaseSeeder.Tenant1Owner.Id.ToString() }] + "SELECT role FROM users WHERE id = @id", [new { id = DatabaseSeeder.Tenant1Owner.Id.ToString() }] ); roleUnchanged.Should().Be(nameof(UserRole.Owner)); @@ -106,7 +106,7 @@ await response.ShouldHaveErrorStatusCode( ); var roleUnchanged = Connection.ExecuteScalar( - "SELECT Role FROM Users WHERE Id = @id", [new { id = DatabaseSeeder.Tenant1Owner.Id.ToString() }] + "SELECT role FROM users WHERE id = @id", [new { id = DatabaseSeeder.Tenant1Owner.Id.ToString() }] ); roleUnchanged.Should().Be(nameof(UserRole.Owner)); diff --git a/application/account/Tests/Users/DeclineInvitationTests.cs b/application/account/Tests/Users/DeclineInvitationTests.cs index 654e31f556..f23ca9946e 100644 --- a/application/account/Tests/Users/DeclineInvitationTests.cs +++ b/application/account/Tests/Users/DeclineInvitationTests.cs @@ -2,6 +2,7 @@ using System.Net.Http.Json; using System.Text.Json; using Account.Database; +using Account.Features.Subscriptions.Domain; using Account.Features.Tenants.Domain; using Account.Features.Users.Commands; using Account.Features.Users.Domain; @@ -22,31 +23,32 @@ public async Task DeclineInvitation_WhenValidInviteExists_ShouldDeleteUserAndCol var newTenantId = TenantId.NewId(); var userId = UserId.NewId(); - Connection.Insert("Tenants", [ - ("Id", newTenantId.ToString()), - ("CreatedAt", TimeProvider.GetUtcNow()), - ("ModifiedAt", null), - ("Name", Faker.Company.CompanyName()), - ("State", nameof(TenantState.Active)), - ("Logo", """{"Url":null,"Version":0}""") + Connection.Insert("tenants", [ + ("id", newTenantId.ToString()), + ("created_at", TimeProvider.GetUtcNow()), + ("modified_at", null), + ("name", Faker.Company.CompanyName()), + ("state", nameof(TenantState.Active)), + ("logo", """{"Url":null,"Version":0}"""), + ("plan", nameof(SubscriptionPlan.Basis)) ] ); - Connection.Insert("Users", [ - ("TenantId", newTenantId.ToString()), - ("Id", userId.ToString()), - ("CreatedAt", TimeProvider.GetUtcNow().AddMinutes(-10)), - ("ModifiedAt", null), - ("DeletedAt", null), - ("Email", DatabaseSeeder.Tenant1Member.Email), - ("EmailConfirmed", false), - ("FirstName", null), - ("LastName", null), - ("Title", null), - ("Avatar", JsonSerializer.Serialize(new Avatar())), - ("Role", nameof(UserRole.Member)), - ("Locale", ""), - ("ExternalIdentities", "[]") + Connection.Insert("users", [ + ("tenant_id", newTenantId.ToString()), + ("id", userId.ToString()), + ("created_at", TimeProvider.GetUtcNow().AddMinutes(-10)), + ("modified_at", null), + ("deleted_at", null), + ("email", DatabaseSeeder.Tenant1Member.Email), + ("email_confirmed", false), + ("first_name", null), + ("last_name", null), + ("title", null), + ("avatar", JsonSerializer.Serialize(new Avatar())), + ("role", nameof(UserRole.Member)), + ("locale", ""), + ("external_identities", "[]") ] ); @@ -58,8 +60,8 @@ public async Task DeclineInvitation_WhenValidInviteExists_ShouldDeleteUserAndCol // Assert response.ShouldHaveEmptyHeaderAndLocationOnSuccess(); - Connection.RowExists("Users", userId.ToString()).Should().BeTrue(); - var deletedAt = Connection.ExecuteScalar("SELECT DeletedAt FROM Users WHERE Id = @id", [new { id = userId.ToString() }]); + Connection.RowExists("users", userId.ToString()).Should().BeTrue(); + var deletedAt = Connection.ExecuteScalar("SELECT deleted_at FROM users WHERE id = @id", [new { id = userId.ToString() }]); deletedAt.Should().NotBeNullOrEmpty(); TelemetryEventsCollectorSpy.CollectedEvents.Count.Should().Be(2); @@ -94,59 +96,61 @@ public async Task DeclineInvitation_WhenMultipleInvitesExist_ShouldDeclineSpecif var userId2 = UserId.NewId(); var userId3 = UserId.NewId(); - Connection.Insert("Tenants", [ - ("Id", tenant2Id.ToString()), - ("CreatedAt", TimeProvider.GetUtcNow()), - ("ModifiedAt", null), - ("Name", Faker.Company.CompanyName()), - ("State", nameof(TenantState.Active)), - ("Logo", """{"Url":null,"Version":0}""") + Connection.Insert("tenants", [ + ("id", tenant2Id.ToString()), + ("created_at", TimeProvider.GetUtcNow()), + ("modified_at", null), + ("name", Faker.Company.CompanyName()), + ("state", nameof(TenantState.Active)), + ("logo", """{"Url":null,"Version":0}"""), + ("plan", nameof(SubscriptionPlan.Basis)) ] ); - Connection.Insert("Tenants", [ - ("Id", tenant3Id.ToString()), - ("CreatedAt", TimeProvider.GetUtcNow()), - ("ModifiedAt", null), - ("Name", Faker.Company.CompanyName()), - ("State", nameof(TenantState.Active)), - ("Logo", """{"Url":null,"Version":0}""") + Connection.Insert("tenants", [ + ("id", tenant3Id.ToString()), + ("created_at", TimeProvider.GetUtcNow()), + ("modified_at", null), + ("name", Faker.Company.CompanyName()), + ("state", nameof(TenantState.Active)), + ("logo", """{"Url":null,"Version":0}"""), + ("plan", nameof(SubscriptionPlan.Basis)) ] ); - Connection.Insert("Users", [ - ("TenantId", tenant2Id.ToString()), - ("Id", userId2.ToString()), - ("CreatedAt", TimeProvider.GetUtcNow().AddMinutes(-10)), - ("ModifiedAt", null), - ("DeletedAt", null), - ("Email", DatabaseSeeder.Tenant1Member.Email), - ("EmailConfirmed", false), - ("FirstName", null), - ("LastName", null), - ("Title", null), - ("Avatar", JsonSerializer.Serialize(new Avatar())), - ("Role", nameof(UserRole.Member)), - ("Locale", ""), - ("ExternalIdentities", "[]") + Connection.Insert("users", [ + ("tenant_id", tenant2Id.ToString()), + ("id", userId2.ToString()), + ("created_at", TimeProvider.GetUtcNow().AddMinutes(-10)), + ("modified_at", null), + ("deleted_at", null), + ("email", DatabaseSeeder.Tenant1Member.Email), + ("email_confirmed", false), + ("first_name", null), + ("last_name", null), + ("title", null), + ("avatar", JsonSerializer.Serialize(new Avatar())), + ("role", nameof(UserRole.Member)), + ("locale", ""), + ("external_identities", "[]") ] ); - Connection.Insert("Users", [ - ("TenantId", tenant3Id.ToString()), - ("Id", userId3.ToString()), - ("CreatedAt", TimeProvider.GetUtcNow().AddMinutes(-5)), - ("ModifiedAt", null), - ("DeletedAt", null), - ("Email", DatabaseSeeder.Tenant1Member.Email), - ("EmailConfirmed", false), - ("FirstName", null), - ("LastName", null), - ("Title", null), - ("Avatar", JsonSerializer.Serialize(new Avatar())), - ("Role", nameof(UserRole.Member)), - ("Locale", ""), - ("ExternalIdentities", "[]") + Connection.Insert("users", [ + ("tenant_id", tenant3Id.ToString()), + ("id", userId3.ToString()), + ("created_at", TimeProvider.GetUtcNow().AddMinutes(-5)), + ("modified_at", null), + ("deleted_at", null), + ("email", DatabaseSeeder.Tenant1Member.Email), + ("email_confirmed", false), + ("first_name", null), + ("last_name", null), + ("title", null), + ("avatar", JsonSerializer.Serialize(new Avatar())), + ("role", nameof(UserRole.Member)), + ("locale", ""), + ("external_identities", "[]") ] ); @@ -158,10 +162,10 @@ public async Task DeclineInvitation_WhenMultipleInvitesExist_ShouldDeclineSpecif // Assert response.ShouldHaveEmptyHeaderAndLocationOnSuccess(); - Connection.RowExists("Users", userId2.ToString()).Should().BeTrue(); - var deletedAt = Connection.ExecuteScalar("SELECT DeletedAt FROM Users WHERE Id = @id", [new { id = userId2.ToString() }]); + Connection.RowExists("users", userId2.ToString()).Should().BeTrue(); + var deletedAt = Connection.ExecuteScalar("SELECT deleted_at FROM users WHERE id = @id", [new { id = userId2.ToString() }]); deletedAt.Should().NotBeNullOrEmpty(); - Connection.RowExists("Users", userId3.ToString()).Should().BeTrue(); + Connection.RowExists("users", userId3.ToString()).Should().BeTrue(); TelemetryEventsCollectorSpy.CollectedEvents.Count.Should().Be(2); TelemetryEventsCollectorSpy.CollectedEvents[0].GetType().Name.Should().Be("UserInviteDeclined"); diff --git a/application/account/Tests/Users/DeleteUserTests.cs b/application/account/Tests/Users/DeleteUserTests.cs index e969ed9a55..1aa7c57c4b 100644 --- a/application/account/Tests/Users/DeleteUserTests.cs +++ b/application/account/Tests/Users/DeleteUserTests.cs @@ -31,21 +31,21 @@ public async Task DeleteUser_WhenUserExists_ShouldSoftDeleteUser() { // Arrange var userId = UserId.NewId(); - Connection.Insert("Users", [ - ("TenantId", DatabaseSeeder.Tenant1.Id.ToString()), - ("Id", userId.ToString()), - ("CreatedAt", TimeProvider.GetUtcNow().AddMinutes(-10)), - ("ModifiedAt", null), - ("DeletedAt", null), - ("Email", Faker.Internet.UniqueEmail()), - ("FirstName", Faker.Person.FirstName), - ("LastName", Faker.Person.LastName), - ("Title", "Philanthropist & Innovator"), - ("Role", nameof(UserRole.Member)), - ("EmailConfirmed", true), - ("Avatar", JsonSerializer.Serialize(new Avatar())), - ("Locale", "en-US"), - ("ExternalIdentities", "[]") + Connection.Insert("users", [ + ("tenant_id", DatabaseSeeder.Tenant1.Id.ToString()), + ("id", userId.ToString()), + ("created_at", TimeProvider.GetUtcNow().AddMinutes(-10)), + ("modified_at", null), + ("deleted_at", null), + ("email", Faker.Internet.UniqueEmail()), + ("first_name", Faker.Person.FirstName), + ("last_name", Faker.Person.LastName), + ("title", "Philanthropist & Innovator"), + ("role", nameof(UserRole.Member)), + ("email_confirmed", true), + ("avatar", JsonSerializer.Serialize(new Avatar())), + ("locale", "en-US"), + ("external_identities", "[]") ] ); @@ -54,8 +54,8 @@ public async Task DeleteUser_WhenUserExists_ShouldSoftDeleteUser() // Assert response.ShouldHaveEmptyHeaderAndLocationOnSuccess(); - Connection.RowExists("Users", userId.ToString()).Should().BeTrue(); - var deletedAt = Connection.ExecuteScalar("SELECT DeletedAt FROM Users WHERE Id = @id", [new { id = userId.ToString() }]); + Connection.RowExists("users", userId.ToString()).Should().BeTrue(); + var deletedAt = Connection.ExecuteScalar("SELECT deleted_at FROM users WHERE id = @id", [new { id = userId.ToString() }]); deletedAt.Should().NotBeNullOrEmpty(); } @@ -77,36 +77,36 @@ public async Task DeleteUser_WhenUserHasEmailLoginHistory_ShouldSoftDeleteUserAn { // Arrange var userId = UserId.NewId(); - Connection.Insert("Users", [ - ("TenantId", DatabaseSeeder.Tenant1.Id.ToString()), - ("Id", userId.ToString()), - ("CreatedAt", TimeProvider.GetUtcNow().AddMinutes(-10)), - ("ModifiedAt", null), - ("DeletedAt", null), - ("Email", Faker.Internet.UniqueEmail()), - ("FirstName", Faker.Person.FirstName), - ("LastName", Faker.Person.LastName), - ("Title", "Philanthropist & Innovator"), - ("Role", nameof(UserRole.Member)), - ("EmailConfirmed", true), - ("Avatar", JsonSerializer.Serialize(new Avatar())), - ("Locale", "en-US"), - ("ExternalIdentities", "[]") + Connection.Insert("users", [ + ("tenant_id", DatabaseSeeder.Tenant1.Id.ToString()), + ("id", userId.ToString()), + ("created_at", TimeProvider.GetUtcNow().AddMinutes(-10)), + ("modified_at", null), + ("deleted_at", null), + ("email", Faker.Internet.UniqueEmail()), + ("first_name", Faker.Person.FirstName), + ("last_name", Faker.Person.LastName), + ("title", "Philanthropist & Innovator"), + ("role", nameof(UserRole.Member)), + ("email_confirmed", true), + ("avatar", JsonSerializer.Serialize(new Avatar())), + ("locale", "en-US"), + ("external_identities", "[]") ] ); - var email = Connection.ExecuteScalar("SELECT Email FROM Users WHERE Id = @id", [new { id = userId.ToString() }]); + var email = Connection.ExecuteScalar("SELECT email FROM users WHERE id = @id", [new { id = userId.ToString() }]); var emailLoginId = EmailLoginId.NewId(); - Connection.Insert("EmailLogins", [ - ("Id", emailLoginId.ToString()), - ("CreatedAt", TimeProvider.GetUtcNow().AddMinutes(-5)), - ("ModifiedAt", null), - ("Email", email), - ("Type", nameof(EmailLoginType.Login)), - ("OneTimePasswordHash", "hash"), - ("RetryCount", 0), - ("ResendCount", 0), - ("Completed", true) + Connection.Insert("email_logins", [ + ("id", emailLoginId.ToString()), + ("created_at", TimeProvider.GetUtcNow().AddMinutes(-5)), + ("modified_at", null), + ("email", email), + ("type", nameof(EmailLoginType.Login)), + ("one_time_password_hash", "hash"), + ("retry_count", 0), + ("resend_count", 0), + ("completed", true) ] ); @@ -115,10 +115,10 @@ public async Task DeleteUser_WhenUserHasEmailLoginHistory_ShouldSoftDeleteUserAn // Assert response.ShouldHaveEmptyHeaderAndLocationOnSuccess(); - Connection.RowExists("Users", userId.ToString()).Should().BeTrue(); - var deletedAt = Connection.ExecuteScalar("SELECT DeletedAt FROM Users WHERE Id = @id", [new { id = userId.ToString() }]); + Connection.RowExists("users", userId.ToString()).Should().BeTrue(); + var deletedAt = Connection.ExecuteScalar("SELECT deleted_at FROM users WHERE id = @id", [new { id = userId.ToString() }]); deletedAt.Should().NotBeNullOrEmpty(); - Connection.RowExists("EmailLogins", emailLoginId.ToString()).Should().BeTrue(); + Connection.RowExists("email_logins", emailLoginId.ToString()).Should().BeTrue(); } [Fact] @@ -126,21 +126,21 @@ public async Task DeleteUser_WhenUserNeverConfirmedEmail_ShouldSoftDeleteUser() { // Arrange var userId = UserId.NewId(); - Connection.Insert("Users", [ - ("TenantId", DatabaseSeeder.Tenant1.Id.ToString()), - ("Id", userId.ToString()), - ("CreatedAt", TimeProvider.GetUtcNow().AddMinutes(-10)), - ("ModifiedAt", null), - ("DeletedAt", null), - ("Email", Faker.Internet.UniqueEmail()), - ("FirstName", null), - ("LastName", null), - ("Title", null), - ("Role", nameof(UserRole.Member)), - ("EmailConfirmed", false), - ("Avatar", JsonSerializer.Serialize(new Avatar())), - ("Locale", "en-US"), - ("ExternalIdentities", "[]") + Connection.Insert("users", [ + ("tenant_id", DatabaseSeeder.Tenant1.Id.ToString()), + ("id", userId.ToString()), + ("created_at", TimeProvider.GetUtcNow().AddMinutes(-10)), + ("modified_at", null), + ("deleted_at", null), + ("email", Faker.Internet.UniqueEmail()), + ("first_name", null), + ("last_name", null), + ("title", null), + ("role", nameof(UserRole.Member)), + ("email_confirmed", false), + ("avatar", JsonSerializer.Serialize(new Avatar())), + ("locale", "en-US"), + ("external_identities", "[]") ] ); @@ -149,8 +149,8 @@ public async Task DeleteUser_WhenUserNeverConfirmedEmail_ShouldSoftDeleteUser() // Assert response.ShouldHaveEmptyHeaderAndLocationOnSuccess(); - Connection.RowExists("Users", userId.ToString()).Should().BeTrue(); - var deletedAt = Connection.ExecuteScalar("SELECT DeletedAt FROM Users WHERE Id = @id", [new { id = userId.ToString() }]); + Connection.RowExists("users", userId.ToString()).Should().BeTrue(); + var deletedAt = Connection.ExecuteScalar("SELECT deleted_at FROM users WHERE id = @id", [new { id = userId.ToString() }]); deletedAt.Should().NotBeNullOrEmpty(); } } diff --git a/application/account/Tests/Users/EmptyRecycleBinTests.cs b/application/account/Tests/Users/EmptyRecycleBinTests.cs index 62999bf56d..53a9eb8aab 100644 --- a/application/account/Tests/Users/EmptyRecycleBinTests.cs +++ b/application/account/Tests/Users/EmptyRecycleBinTests.cs @@ -19,38 +19,38 @@ public async Task EmptyRecycleBin_WhenOwnerEmptiesRecycleBin_ShouldPermanentlyDe // Arrange var deletedUserId1 = UserId.NewId(); var deletedUserId2 = UserId.NewId(); - Connection.Insert("Users", [ - ("TenantId", DatabaseSeeder.Tenant1.Id.ToString()), - ("Id", deletedUserId1.ToString()), - ("CreatedAt", TimeProvider.GetUtcNow().AddDays(-10)), - ("ModifiedAt", TimeProvider.GetUtcNow().AddDays(-2)), - ("DeletedAt", TimeProvider.GetUtcNow().AddDays(-2)), - ("Email", Faker.Internet.UniqueEmail()), - ("FirstName", Faker.Person.FirstName), - ("LastName", Faker.Person.LastName), - ("Title", "Former Employee 1"), - ("Role", nameof(UserRole.Member)), - ("EmailConfirmed", true), - ("Avatar", JsonSerializer.Serialize(new Avatar())), - ("Locale", "en-US"), - ("ExternalIdentities", "[]") + Connection.Insert("users", [ + ("tenant_id", DatabaseSeeder.Tenant1.Id.ToString()), + ("id", deletedUserId1.ToString()), + ("created_at", TimeProvider.GetUtcNow().AddDays(-10)), + ("modified_at", TimeProvider.GetUtcNow().AddDays(-2)), + ("deleted_at", TimeProvider.GetUtcNow().AddDays(-2)), + ("email", Faker.Internet.UniqueEmail()), + ("first_name", Faker.Person.FirstName), + ("last_name", Faker.Person.LastName), + ("title", "Former Employee 1"), + ("role", nameof(UserRole.Member)), + ("email_confirmed", true), + ("avatar", JsonSerializer.Serialize(new Avatar())), + ("locale", "en-US"), + ("external_identities", "[]") ] ); - Connection.Insert("Users", [ - ("TenantId", DatabaseSeeder.Tenant1.Id.ToString()), - ("Id", deletedUserId2.ToString()), - ("CreatedAt", TimeProvider.GetUtcNow().AddDays(-5)), - ("ModifiedAt", TimeProvider.GetUtcNow().AddDays(-1)), - ("DeletedAt", TimeProvider.GetUtcNow().AddDays(-1)), - ("Email", Faker.Internet.UniqueEmail()), - ("FirstName", Faker.Person.FirstName), - ("LastName", Faker.Person.LastName), - ("Title", "Former Employee 2"), - ("Role", nameof(UserRole.Member)), - ("EmailConfirmed", true), - ("Avatar", JsonSerializer.Serialize(new Avatar())), - ("Locale", "en-US"), - ("ExternalIdentities", "[]") + Connection.Insert("users", [ + ("tenant_id", DatabaseSeeder.Tenant1.Id.ToString()), + ("id", deletedUserId2.ToString()), + ("created_at", TimeProvider.GetUtcNow().AddDays(-5)), + ("modified_at", TimeProvider.GetUtcNow().AddDays(-1)), + ("deleted_at", TimeProvider.GetUtcNow().AddDays(-1)), + ("email", Faker.Internet.UniqueEmail()), + ("first_name", Faker.Person.FirstName), + ("last_name", Faker.Person.LastName), + ("title", "Former Employee 2"), + ("role", nameof(UserRole.Member)), + ("email_confirmed", true), + ("avatar", JsonSerializer.Serialize(new Avatar())), + ("locale", "en-US"), + ("external_identities", "[]") ] ); @@ -61,8 +61,8 @@ public async Task EmptyRecycleBin_WhenOwnerEmptiesRecycleBin_ShouldPermanentlyDe response.EnsureSuccessStatusCode(); var deletedCount = await response.Content.ReadFromJsonAsync(); deletedCount.Should().Be(2); - Connection.RowExists("Users", deletedUserId1.ToString()).Should().BeFalse(); - Connection.RowExists("Users", deletedUserId2.ToString()).Should().BeFalse(); + Connection.RowExists("users", deletedUserId1.ToString()).Should().BeFalse(); + Connection.RowExists("users", deletedUserId2.ToString()).Should().BeFalse(); TelemetryEventsCollectorSpy.CollectedEvents.Count.Should().Be(2); TelemetryEventsCollectorSpy.CollectedEvents.Should().AllSatisfy(e => diff --git a/application/account/Tests/Users/GetDeletedUsersTests.cs b/application/account/Tests/Users/GetDeletedUsersTests.cs index e4dd98110d..7599281c5c 100644 --- a/application/account/Tests/Users/GetDeletedUsersTests.cs +++ b/application/account/Tests/Users/GetDeletedUsersTests.cs @@ -19,21 +19,21 @@ public async Task GetDeletedUsers_WhenOwner_ShouldReturnDeletedUsers() { // Arrange var deletedUserId = UserId.NewId(); - Connection.Insert("Users", [ - ("TenantId", DatabaseSeeder.Tenant1.Id.ToString()), - ("Id", deletedUserId.ToString()), - ("CreatedAt", TimeProvider.GetUtcNow().AddDays(-10)), - ("ModifiedAt", TimeProvider.GetUtcNow().AddDays(-1)), - ("DeletedAt", TimeProvider.GetUtcNow().AddDays(-1)), - ("Email", Faker.Internet.UniqueEmail()), - ("FirstName", Faker.Person.FirstName), - ("LastName", Faker.Person.LastName), - ("Title", "Former Employee"), - ("Role", nameof(UserRole.Member)), - ("EmailConfirmed", true), - ("Avatar", JsonSerializer.Serialize(new Avatar())), - ("Locale", "en-US"), - ("ExternalIdentities", "[]") + Connection.Insert("users", [ + ("tenant_id", DatabaseSeeder.Tenant1.Id.ToString()), + ("id", deletedUserId.ToString()), + ("created_at", TimeProvider.GetUtcNow().AddDays(-10)), + ("modified_at", TimeProvider.GetUtcNow().AddDays(-1)), + ("deleted_at", TimeProvider.GetUtcNow().AddDays(-1)), + ("email", Faker.Internet.UniqueEmail()), + ("first_name", Faker.Person.FirstName), + ("last_name", Faker.Person.LastName), + ("title", "Former Employee"), + ("role", nameof(UserRole.Member)), + ("email_confirmed", true), + ("avatar", JsonSerializer.Serialize(new Avatar())), + ("locale", "en-US"), + ("external_identities", "[]") ] ); diff --git a/application/account/Tests/Users/GetUserByIdTests.cs b/application/account/Tests/Users/GetUserByIdTests.cs index 1e38dc7111..ce461487ab 100644 --- a/application/account/Tests/Users/GetUserByIdTests.cs +++ b/application/account/Tests/Users/GetUserByIdTests.cs @@ -17,20 +17,20 @@ public sealed class GetUserByIdTests : EndpointBaseTest public GetUserByIdTests() { - Connection.Insert("Users", [ - ("TenantId", DatabaseSeeder.Tenant1.Id.ToString()), - ("Id", _userId.ToString()), - ("CreatedAt", TimeProvider.GetUtcNow().AddMinutes(-10)), - ("ModifiedAt", null), - ("Email", Faker.Internet.UniqueEmail()), - ("FirstName", Faker.Name.FirstName()), - ("LastName", Faker.Name.LastName()), - ("Title", Faker.Name.JobTitle()), - ("Role", nameof(UserRole.Member)), - ("EmailConfirmed", true), - ("Avatar", JsonSerializer.Serialize(new Avatar())), - ("Locale", "en-US"), - ("ExternalIdentities", "[]") + Connection.Insert("users", [ + ("tenant_id", DatabaseSeeder.Tenant1.Id.ToString()), + ("id", _userId.ToString()), + ("created_at", TimeProvider.GetUtcNow().AddMinutes(-10)), + ("modified_at", null), + ("email", Faker.Internet.UniqueEmail()), + ("first_name", Faker.Name.FirstName()), + ("last_name", Faker.Name.LastName()), + ("title", Faker.Name.JobTitle()), + ("role", nameof(UserRole.Member)), + ("email_confirmed", true), + ("avatar", JsonSerializer.Serialize(new Avatar())), + ("locale", "en-US"), + ("external_identities", "[]") ] ); } diff --git a/application/account/Tests/Users/GetUserSummaryTests.cs b/application/account/Tests/Users/GetUserSummaryTests.cs index 39c6388a2d..8eed028b9e 100644 --- a/application/account/Tests/Users/GetUserSummaryTests.cs +++ b/application/account/Tests/Users/GetUserSummaryTests.cs @@ -20,62 +20,62 @@ public async Task GetUserSummary_WhenUsersHaveVariousLastSeenDates_ShouldCountAc var thirtyOneDaysAgo = now.AddDays(-31); // Set the seeded owner user as active (LastSeenAt within 30 days) - Connection.Update("Users", "Id", DatabaseSeeder.Tenant1Owner.Id.ToString(), [("LastSeenAt", now)]); + Connection.Update("users", "id", DatabaseSeeder.Tenant1Owner.Id.ToString(), [("last_seen_at", now)]); // Insert an active user (LastSeenAt within 30 days, confirmed) - Connection.Insert("Users", [ - ("TenantId", DatabaseSeeder.Tenant1.Id.ToString()), - ("Id", UserId.NewId().ToString()), - ("CreatedAt", now.AddMinutes(-10)), - ("ModifiedAt", null), - ("Email", "active@example.com"), - ("FirstName", "Active"), - ("LastName", "User"), - ("Title", null), - ("Role", nameof(UserRole.Member)), - ("EmailConfirmed", true), - ("LastSeenAt", now.AddDays(-5)), - ("Avatar", JsonSerializer.Serialize(new Avatar())), - ("Locale", "en-US"), - ("ExternalIdentities", "[]") + Connection.Insert("users", [ + ("tenant_id", DatabaseSeeder.Tenant1.Id.ToString()), + ("id", UserId.NewId().ToString()), + ("created_at", now.AddMinutes(-10)), + ("modified_at", null), + ("email", "active@example.com"), + ("first_name", "Active"), + ("last_name", "User"), + ("title", null), + ("role", nameof(UserRole.Member)), + ("email_confirmed", true), + ("last_seen_at", now.AddDays(-5)), + ("avatar", JsonSerializer.Serialize(new Avatar())), + ("locale", "en-US"), + ("external_identities", "[]") ] ); // Insert an inactive user (LastSeenAt older than 30 days, confirmed) - Connection.Insert("Users", [ - ("TenantId", DatabaseSeeder.Tenant1.Id.ToString()), - ("Id", UserId.NewId().ToString()), - ("CreatedAt", now.AddDays(-60)), - ("ModifiedAt", null), - ("Email", "inactive@example.com"), - ("FirstName", "Inactive"), - ("LastName", "User"), - ("Title", null), - ("Role", nameof(UserRole.Member)), - ("EmailConfirmed", true), - ("LastSeenAt", thirtyOneDaysAgo), - ("Avatar", JsonSerializer.Serialize(new Avatar())), - ("Locale", "en-US"), - ("ExternalIdentities", "[]") + Connection.Insert("users", [ + ("tenant_id", DatabaseSeeder.Tenant1.Id.ToString()), + ("id", UserId.NewId().ToString()), + ("created_at", now.AddDays(-60)), + ("modified_at", null), + ("email", "inactive@example.com"), + ("first_name", "Inactive"), + ("last_name", "User"), + ("title", null), + ("role", nameof(UserRole.Member)), + ("email_confirmed", true), + ("last_seen_at", thirtyOneDaysAgo), + ("avatar", JsonSerializer.Serialize(new Avatar())), + ("locale", "en-US"), + ("external_identities", "[]") ] ); // Insert a pending user (not confirmed, no LastSeenAt) - Connection.Insert("Users", [ - ("TenantId", DatabaseSeeder.Tenant1.Id.ToString()), - ("Id", UserId.NewId().ToString()), - ("CreatedAt", now.AddMinutes(-5)), - ("ModifiedAt", null), - ("Email", "pending@example.com"), - ("FirstName", null), - ("LastName", null), - ("Title", null), - ("Role", nameof(UserRole.Member)), - ("EmailConfirmed", false), - ("LastSeenAt", null), - ("Avatar", JsonSerializer.Serialize(new Avatar())), - ("Locale", "en-US"), - ("ExternalIdentities", "[]") + Connection.Insert("users", [ + ("tenant_id", DatabaseSeeder.Tenant1.Id.ToString()), + ("id", UserId.NewId().ToString()), + ("created_at", now.AddMinutes(-5)), + ("modified_at", null), + ("email", "pending@example.com"), + ("first_name", null), + ("last_name", null), + ("title", null), + ("role", nameof(UserRole.Member)), + ("email_confirmed", false), + ("last_seen_at", null), + ("avatar", JsonSerializer.Serialize(new Avatar())), + ("locale", "en-US"), + ("external_identities", "[]") ] ); diff --git a/application/account/Tests/Users/GetUsersTests.cs b/application/account/Tests/Users/GetUsersTests.cs index 342cf07c44..06e4f4a001 100644 --- a/application/account/Tests/Users/GetUsersTests.cs +++ b/application/account/Tests/Users/GetUsersTests.cs @@ -19,36 +19,36 @@ public sealed class GetUsersTests : EndpointBaseTest public GetUsersTests() { - Connection.Insert("Users", [ - ("TenantId", DatabaseSeeder.Tenant1.Id.ToString()), - ("Id", UserId.NewId().ToString()), - ("CreatedAt", TimeProvider.GetUtcNow().AddMinutes(-10)), - ("ModifiedAt", null), - ("Email", Email), - ("FirstName", FirstName), - ("LastName", LastName), - ("Title", "Philanthropist & Innovator"), - ("Role", UserRole.ToString()), - ("EmailConfirmed", true), - ("Avatar", JsonSerializer.Serialize(new Avatar())), - ("Locale", "en-US"), - ("ExternalIdentities", "[]") + Connection.Insert("users", [ + ("tenant_id", DatabaseSeeder.Tenant1.Id.ToString()), + ("id", UserId.NewId().ToString()), + ("created_at", TimeProvider.GetUtcNow().AddMinutes(-10)), + ("modified_at", null), + ("email", Email), + ("first_name", FirstName), + ("last_name", LastName), + ("title", "Philanthropist & Innovator"), + ("role", UserRole.ToString()), + ("email_confirmed", true), + ("avatar", JsonSerializer.Serialize(new Avatar())), + ("locale", "en-US"), + ("external_identities", "[]") ] ); - Connection.Insert("Users", [ - ("TenantId", DatabaseSeeder.Tenant1.Id.ToString()), - ("Id", UserId.NewId().ToString()), - ("CreatedAt", TimeProvider.GetUtcNow().AddMinutes(-10)), - ("ModifiedAt", null), - ("Email", "ada@lovelace.com"), - ("FirstName", "Ada"), - ("LastName", "Lovelace"), - ("Title", "Mathematician & Writer"), - ("Role", UserRole.ToString()), - ("EmailConfirmed", true), - ("Avatar", JsonSerializer.Serialize(new Avatar())), - ("Locale", "en-US"), - ("ExternalIdentities", "[]") + Connection.Insert("users", [ + ("tenant_id", DatabaseSeeder.Tenant1.Id.ToString()), + ("id", UserId.NewId().ToString()), + ("created_at", TimeProvider.GetUtcNow().AddMinutes(-10)), + ("modified_at", null), + ("email", "ada@lovelace.com"), + ("first_name", "Ada"), + ("last_name", "Lovelace"), + ("title", "Mathematician & Writer"), + ("role", UserRole.ToString()), + ("email_confirmed", true), + ("avatar", JsonSerializer.Serialize(new Avatar())), + ("locale", "en-US"), + ("external_identities", "[]") ] ); } diff --git a/application/account/Tests/Users/InviteUserTests.cs b/application/account/Tests/Users/InviteUserTests.cs index 2177beb081..40920bb4e2 100644 --- a/application/account/Tests/Users/InviteUserTests.cs +++ b/application/account/Tests/Users/InviteUserTests.cs @@ -37,7 +37,7 @@ public async Task InviteUser_WhenTenantHasName_ShouldCreateUserAndUseTenantNameI { // Arrange var tenantName = "Test Company"; - Connection.Update("Tenants", "Id", DatabaseSeeder.Tenant1.Id.ToString(), [("Name", tenantName)]); + Connection.Update("tenants", "id", DatabaseSeeder.Tenant1.Id.ToString(), [("name", tenantName)]); var email = Faker.Internet.UniqueEmail(); var command = new InviteUserCommand(email); @@ -50,7 +50,7 @@ public async Task InviteUser_WhenTenantHasName_ShouldCreateUserAndUseTenantNameI // Verify user was created Connection.ExecuteScalar( - "SELECT COUNT(*) FROM Users WHERE TenantId = @tenantId AND Email = @email AND EmailConfirmed = 0", + "SELECT COUNT(*) FROM users WHERE tenant_id = @tenantId AND email = @email AND email_confirmed = 0", [new { tenantId = DatabaseSeeder.Tenant1.Id.ToString(), email = email.ToLower() }] ).Should().Be(1); @@ -91,7 +91,7 @@ public async Task InviteUser_WhenInvalidEmail_ShouldReturnBadRequest() public async Task InviteUser_WhenUserExists_ShouldReturnBadRequest() { // Arrange - Connection.Update("Tenants", "Id", DatabaseSeeder.Tenant1.Id.ToString(), [("Name", "Test Company")]); + Connection.Update("tenants", "id", DatabaseSeeder.Tenant1.Id.ToString(), [("name", "Test Company")]); var existingUserEmail = DatabaseSeeder.Tenant1Owner.Email; var command = new InviteUserCommand(existingUserEmail); @@ -109,25 +109,25 @@ public async Task InviteUser_WhenUserExists_ShouldReturnBadRequest() public async Task InviteUser_WhenDeletedUserExists_ShouldReturnBadRequest() { // Arrange - Connection.Update("Tenants", "Id", DatabaseSeeder.Tenant1.Id.ToString(), [("Name", "Test Company")]); + Connection.Update("tenants", "id", DatabaseSeeder.Tenant1.Id.ToString(), [("name", "Test Company")]); var deletedUserEmail = Faker.Internet.UniqueEmail().ToLower(); var deletedUserId = UserId.NewId(); - Connection.Insert("Users", [ - ("TenantId", DatabaseSeeder.Tenant1.Id.ToString()), - ("Id", deletedUserId.ToString()), - ("CreatedAt", TimeProvider.GetUtcNow().AddDays(-10)), - ("ModifiedAt", TimeProvider.GetUtcNow().AddDays(-1)), - ("DeletedAt", TimeProvider.GetUtcNow().AddDays(-1)), - ("Email", deletedUserEmail), - ("FirstName", Faker.Person.FirstName), - ("LastName", Faker.Person.LastName), - ("Title", "Former Employee"), - ("Role", nameof(UserRole.Member)), - ("EmailConfirmed", true), - ("Avatar", JsonSerializer.Serialize(new Avatar())), - ("Locale", "en-US"), - ("ExternalIdentities", "[]") + Connection.Insert("users", [ + ("tenant_id", DatabaseSeeder.Tenant1.Id.ToString()), + ("id", deletedUserId.ToString()), + ("created_at", TimeProvider.GetUtcNow().AddDays(-10)), + ("modified_at", TimeProvider.GetUtcNow().AddDays(-1)), + ("deleted_at", TimeProvider.GetUtcNow().AddDays(-1)), + ("email", deletedUserEmail), + ("first_name", Faker.Person.FirstName), + ("last_name", Faker.Person.LastName), + ("title", "Former Employee"), + ("role", nameof(UserRole.Member)), + ("email_confirmed", true), + ("avatar", JsonSerializer.Serialize(new Avatar())), + ("locale", "en-US"), + ("external_identities", "[]") ] ); diff --git a/application/account/Tests/Users/PurgeUserTests.cs b/application/account/Tests/Users/PurgeUserTests.cs index ca04be947d..881a30052f 100644 --- a/application/account/Tests/Users/PurgeUserTests.cs +++ b/application/account/Tests/Users/PurgeUserTests.cs @@ -17,21 +17,21 @@ public async Task PurgeUser_WhenOwnerDeletesSoftDeletedUser_ShouldSucceed() { // Arrange var deletedUserId = UserId.NewId(); - Connection.Insert("Users", [ - ("TenantId", DatabaseSeeder.Tenant1.Id.ToString()), - ("Id", deletedUserId.ToString()), - ("CreatedAt", TimeProvider.GetUtcNow().AddDays(-10)), - ("ModifiedAt", TimeProvider.GetUtcNow().AddDays(-1)), - ("DeletedAt", TimeProvider.GetUtcNow().AddDays(-1)), - ("Email", Faker.Internet.UniqueEmail()), - ("FirstName", Faker.Person.FirstName), - ("LastName", Faker.Person.LastName), - ("Title", "Former Employee"), - ("Role", nameof(UserRole.Member)), - ("EmailConfirmed", true), - ("Avatar", JsonSerializer.Serialize(new Avatar())), - ("Locale", "en-US"), - ("ExternalIdentities", "[]") + Connection.Insert("users", [ + ("tenant_id", DatabaseSeeder.Tenant1.Id.ToString()), + ("id", deletedUserId.ToString()), + ("created_at", TimeProvider.GetUtcNow().AddDays(-10)), + ("modified_at", TimeProvider.GetUtcNow().AddDays(-1)), + ("deleted_at", TimeProvider.GetUtcNow().AddDays(-1)), + ("email", Faker.Internet.UniqueEmail()), + ("first_name", Faker.Person.FirstName), + ("last_name", Faker.Person.LastName), + ("title", "Former Employee"), + ("role", nameof(UserRole.Member)), + ("email_confirmed", true), + ("avatar", JsonSerializer.Serialize(new Avatar())), + ("locale", "en-US"), + ("external_identities", "[]") ] ); @@ -40,7 +40,7 @@ public async Task PurgeUser_WhenOwnerDeletesSoftDeletedUser_ShouldSucceed() // Assert response.ShouldHaveEmptyHeaderAndLocationOnSuccess(); - Connection.RowExists("Users", deletedUserId.ToString()).Should().BeFalse(); + Connection.RowExists("users", deletedUserId.ToString()).Should().BeFalse(); TelemetryEventsCollectorSpy.CollectedEvents.Count.Should().Be(1); TelemetryEventsCollectorSpy.CollectedEvents[0].GetType().Name.Should().Be("UserPurged"); @@ -78,21 +78,21 @@ public async Task PurgeUser_WhenUserNotDeleted_ShouldReturnNotFound() { // Arrange var activeUserId = UserId.NewId(); - Connection.Insert("Users", [ - ("TenantId", DatabaseSeeder.Tenant1.Id.ToString()), - ("Id", activeUserId.ToString()), - ("CreatedAt", TimeProvider.GetUtcNow().AddDays(-10)), - ("ModifiedAt", null), - ("DeletedAt", null), - ("Email", Faker.Internet.UniqueEmail()), - ("FirstName", Faker.Person.FirstName), - ("LastName", Faker.Person.LastName), - ("Title", "Active Employee"), - ("Role", nameof(UserRole.Member)), - ("EmailConfirmed", true), - ("Avatar", JsonSerializer.Serialize(new Avatar())), - ("Locale", "en-US"), - ("ExternalIdentities", "[]") + Connection.Insert("users", [ + ("tenant_id", DatabaseSeeder.Tenant1.Id.ToString()), + ("id", activeUserId.ToString()), + ("created_at", TimeProvider.GetUtcNow().AddDays(-10)), + ("modified_at", null), + ("deleted_at", null), + ("email", Faker.Internet.UniqueEmail()), + ("first_name", Faker.Person.FirstName), + ("last_name", Faker.Person.LastName), + ("title", "Active Employee"), + ("role", nameof(UserRole.Member)), + ("email_confirmed", true), + ("avatar", JsonSerializer.Serialize(new Avatar())), + ("locale", "en-US"), + ("external_identities", "[]") ] ); diff --git a/application/account/Tests/Users/RestoreUserTests.cs b/application/account/Tests/Users/RestoreUserTests.cs index fccd66fae7..85d833f592 100644 --- a/application/account/Tests/Users/RestoreUserTests.cs +++ b/application/account/Tests/Users/RestoreUserTests.cs @@ -17,21 +17,21 @@ public async Task RestoreUser_WhenOwnerRestoresDeletedUser_ShouldSucceed() { // Arrange var deletedUserId = UserId.NewId(); - Connection.Insert("Users", [ - ("TenantId", DatabaseSeeder.Tenant1.Id.ToString()), - ("Id", deletedUserId.ToString()), - ("CreatedAt", TimeProvider.GetUtcNow().AddDays(-10)), - ("ModifiedAt", TimeProvider.GetUtcNow().AddDays(-1)), - ("DeletedAt", TimeProvider.GetUtcNow().AddDays(-1)), - ("Email", Faker.Internet.UniqueEmail()), - ("FirstName", Faker.Person.FirstName), - ("LastName", Faker.Person.LastName), - ("Title", "Former Employee"), - ("Role", nameof(UserRole.Member)), - ("EmailConfirmed", true), - ("Avatar", JsonSerializer.Serialize(new Avatar())), - ("Locale", "en-US"), - ("ExternalIdentities", "[]") + Connection.Insert("users", [ + ("tenant_id", DatabaseSeeder.Tenant1.Id.ToString()), + ("id", deletedUserId.ToString()), + ("created_at", TimeProvider.GetUtcNow().AddDays(-10)), + ("modified_at", TimeProvider.GetUtcNow().AddDays(-1)), + ("deleted_at", TimeProvider.GetUtcNow().AddDays(-1)), + ("email", Faker.Internet.UniqueEmail()), + ("first_name", Faker.Person.FirstName), + ("last_name", Faker.Person.LastName), + ("title", "Former Employee"), + ("role", nameof(UserRole.Member)), + ("email_confirmed", true), + ("avatar", JsonSerializer.Serialize(new Avatar())), + ("locale", "en-US"), + ("external_identities", "[]") ] ); @@ -40,7 +40,7 @@ public async Task RestoreUser_WhenOwnerRestoresDeletedUser_ShouldSucceed() // Assert response.ShouldHaveEmptyHeaderAndLocationOnSuccess(); - var deletedAt = Connection.ExecuteScalar("SELECT DeletedAt FROM Users WHERE Id = @id", [new { id = deletedUserId.ToString() }]); + var deletedAt = Connection.ExecuteScalar("SELECT deleted_at FROM users WHERE id = @id", [new { id = deletedUserId.ToString() }]); deletedAt.Should().BeNullOrEmpty(); TelemetryEventsCollectorSpy.CollectedEvents.Count.Should().Be(1); @@ -78,21 +78,21 @@ public async Task RestoreUser_WhenUserNotDeleted_ShouldReturnNotFound() { // Arrange var activeUserId = UserId.NewId(); - Connection.Insert("Users", [ - ("TenantId", DatabaseSeeder.Tenant1.Id.ToString()), - ("Id", activeUserId.ToString()), - ("CreatedAt", TimeProvider.GetUtcNow().AddDays(-10)), - ("ModifiedAt", null), - ("DeletedAt", null), - ("Email", Faker.Internet.UniqueEmail()), - ("FirstName", Faker.Person.FirstName), - ("LastName", Faker.Person.LastName), - ("Title", "Active Employee"), - ("Role", nameof(UserRole.Member)), - ("EmailConfirmed", true), - ("Avatar", JsonSerializer.Serialize(new Avatar())), - ("Locale", "en-US"), - ("ExternalIdentities", "[]") + Connection.Insert("users", [ + ("tenant_id", DatabaseSeeder.Tenant1.Id.ToString()), + ("id", activeUserId.ToString()), + ("created_at", TimeProvider.GetUtcNow().AddDays(-10)), + ("modified_at", null), + ("deleted_at", null), + ("email", Faker.Internet.UniqueEmail()), + ("first_name", Faker.Person.FirstName), + ("last_name", Faker.Person.LastName), + ("title", "Active Employee"), + ("role", nameof(UserRole.Member)), + ("email_confirmed", true), + ("avatar", JsonSerializer.Serialize(new Avatar())), + ("locale", "en-US"), + ("external_identities", "[]") ] ); diff --git a/application/account/WebApp/routes/legal/cross-references.internal.md b/application/account/WebApp/routes/legal/cross-references.internal.md index 5350bc740e..89eb12e476 100644 --- a/application/account/WebApp/routes/legal/cross-references.internal.md +++ b/application/account/WebApp/routes/legal/cross-references.internal.md @@ -292,14 +292,12 @@ When used generically (not as defined term), use lowercase: "your account settin **We HAVE (describe in DPA Schedule 3):** -- Encryption in transit: HTTPS/TLS 1.2+ (Azure SQL Server enforced) +- Encryption in transit: HTTPS/TLS 1.2+ (PostgreSQL Flexible Server enforced, Ssl Mode=VerifyFull) - Encryption at rest: Azure platform encryption -- RBAC: Azure Active Directory authentication only (azureADOnlyAuthentication: true) -- SQL Server auditing: 90-day retention (authentication, batch operations) -- SQL Server vulnerability assessments: Recurring scans enabled -- SQL Server security alerts: Enabled -- Virtual network isolation: Subnet-based access control -- Restricted outbound network access: Enabled on SQL Server +- RBAC: Entra ID authentication +- PostgreSQL audit logging: log_statement=mod with 90-day retention to diagnostic storage account +- Virtual network isolation: Private Endpoint with Private DNS Zone +- Restricted outbound network access: No permanent firewall rules, temporary CI/CD access only - Application Insights: Monitoring and logging (may be sampled) - Telemetry events: Activity logging for mutations (not comprehensive audit) diff --git a/application/account/WebApp/routes/legal/dpa.en-US.md b/application/account/WebApp/routes/legal/dpa.en-US.md index 7de3bcfa5a..8ffb4e33a7 100644 --- a/application/account/WebApp/routes/legal/dpa.en-US.md +++ b/application/account/WebApp/routes/legal/dpa.en-US.md @@ -1,6 +1,6 @@ # Data Processing Agreement -**Effective date:** 1 Jan, 2026 +**Effective date:** 17 March, 2026 --- @@ -280,9 +280,9 @@ We will provide 14 days advance notice before engaging additional Sub-Processors **Database Security:** -- SQL Server auditing (90-day retention) -- SQL Server vulnerability assessments -- SQL Server security alerts +- Database audit logging (90-day retention) +- Encrypted connections with certificate verification +- Private network isolation **Monitoring and Logging:** diff --git a/application/back-office/Core/Database/Migrations/20250217000000_Initial.cs b/application/back-office/Core/Database/Migrations/20250217000000_Initial.cs index 13191a2c2a..8f84ba0667 100644 --- a/application/back-office/Core/Database/Migrations/20250217000000_Initial.cs +++ b/application/back-office/Core/Database/Migrations/20250217000000_Initial.cs @@ -9,5 +9,17 @@ public sealed class Initial : Migration { protected override void Up(MigrationBuilder migrationBuilder) { + migrationBuilder.CreateTable( + "__data_migrations_history", + table => new + { + migration_id = table.Column("text", nullable: false), + product_version = table.Column("text", nullable: false), + executed_at = table.Column("timestamptz", nullable: false), + execution_time_ms = table.Column("bigint", nullable: false), + summary = table.Column("text", nullable: false) + }, + constraints: table => { table.PrimaryKey("pk___data_migrations_history", x => x.migration_id); } + ); } } diff --git a/application/back-office/Tests/EndpointBaseTest.cs b/application/back-office/Tests/EndpointBaseTest.cs index 909ce5830b..efd791cc6a 100644 --- a/application/back-office/Tests/EndpointBaseTest.cs +++ b/application/back-office/Tests/EndpointBaseTest.cs @@ -52,18 +52,18 @@ protected EndpointBaseTest() Connection = new SqliteConnection($"Data Source=TestDb_{Guid.NewGuid():N};Mode=Memory;Cache=Shared"); Connection.Open(); - // Configure SQLite to behave more like SQL Server + // Configure SQLite to behave more like PostgreSQL using (var command = Connection.CreateCommand()) { - // Enable foreign key constraints (SQL Server has this by default) + // Enable foreign key constraints (PostgreSQL has this by default) command.CommandText = "PRAGMA foreign_keys = ON;"; command.ExecuteNonQuery(); - // Enable recursive triggers (SQL Server supports nested triggers) + // Enable recursive triggers (PostgreSQL supports nested triggers) command.CommandText = "PRAGMA recursive_triggers = ON;"; command.ExecuteNonQuery(); - // Enforce CHECK constraints (SQL Server enforces these by default) + // Enforce CHECK constraints (PostgreSQL enforces these by default) command.CommandText = "PRAGMA ignore_check_constraints = OFF;"; command.ExecuteNonQuery(); @@ -72,7 +72,7 @@ protected EndpointBaseTest() command.ExecuteNonQuery(); } - Services.AddDbContext(options => { options.UseSqlite(Connection); }); + Services.AddDbContext(options => { options.UseSqlite(Connection).UseSnakeCaseNamingConvention(); }); Services.AddBackOfficeServices(); @@ -106,7 +106,7 @@ protected EndpointBaseTest() { // Replace the default DbContext in the WebApplication to use an in-memory SQLite database services.Remove(services.Single(d => d.ServiceType == typeof(IDbContextOptionsConfiguration))); - services.AddDbContext(options => { options.UseSqlite(Connection); }); + services.AddDbContext(options => { options.UseSqlite(Connection).UseSnakeCaseNamingConvention(); }); TelemetryEventsCollectorSpy = new TelemetryEventsCollectorSpy(new TelemetryEventsCollector()); services.AddScoped(_ => TelemetryEventsCollectorSpy); diff --git a/application/main/Core/Database/Migrations/20260125035900_Initial.cs b/application/main/Core/Database/Migrations/20260125035900_Initial.cs index 33764df16f..35b04cb9e3 100644 --- a/application/main/Core/Database/Migrations/20260125035900_Initial.cs +++ b/application/main/Core/Database/Migrations/20260125035900_Initial.cs @@ -9,5 +9,17 @@ public sealed class Initial : Migration { protected override void Up(MigrationBuilder migrationBuilder) { + migrationBuilder.CreateTable( + "__data_migrations_history", + table => new + { + migration_id = table.Column("text", nullable: false), + product_version = table.Column("text", nullable: false), + executed_at = table.Column("timestamptz", nullable: false), + execution_time_ms = table.Column("bigint", nullable: false), + summary = table.Column("text", nullable: false) + }, + constraints: table => { table.PrimaryKey("pk___data_migrations_history", x => x.migration_id); } + ); } } diff --git a/application/main/Tests/EndpointBaseTest.cs b/application/main/Tests/EndpointBaseTest.cs index 686dd46737..04b6d54098 100644 --- a/application/main/Tests/EndpointBaseTest.cs +++ b/application/main/Tests/EndpointBaseTest.cs @@ -52,18 +52,18 @@ protected EndpointBaseTest() Connection = new SqliteConnection($"Data Source=TestDb_{Guid.NewGuid():N};Mode=Memory;Cache=Shared"); Connection.Open(); - // Configure SQLite to behave more like SQL Server + // Configure SQLite to behave more like PostgreSQL using (var command = Connection.CreateCommand()) { - // Enable foreign key constraints (SQL Server has this by default) + // Enable foreign key constraints (PostgreSQL has this by default) command.CommandText = "PRAGMA foreign_keys = ON;"; command.ExecuteNonQuery(); - // Enable recursive triggers (SQL Server supports nested triggers) + // Enable recursive triggers (PostgreSQL supports nested triggers) command.CommandText = "PRAGMA recursive_triggers = ON;"; command.ExecuteNonQuery(); - // Enforce CHECK constraints (SQL Server enforces these by default) + // Enforce CHECK constraints (PostgreSQL enforces these by default) command.CommandText = "PRAGMA ignore_check_constraints = OFF;"; command.ExecuteNonQuery(); @@ -72,7 +72,7 @@ protected EndpointBaseTest() command.ExecuteNonQuery(); } - Services.AddDbContext(options => { options.UseSqlite(Connection); }); + Services.AddDbContext(options => { options.UseSqlite(Connection).UseSnakeCaseNamingConvention(); }); Services.AddMainServices(); @@ -106,7 +106,7 @@ protected EndpointBaseTest() { // Replace the default DbContext in the WebApplication to use an in-memory SQLite database services.Remove(services.Single(d => d.ServiceType == typeof(IDbContextOptionsConfiguration))); - services.AddDbContext(options => { options.UseSqlite(Connection); }); + services.AddDbContext(options => { options.UseSqlite(Connection).UseSnakeCaseNamingConvention(); }); TelemetryEventsCollectorSpy = new TelemetryEventsCollectorSpy(new TelemetryEventsCollector()); services.AddScoped(_ => TelemetryEventsCollectorSpy); diff --git a/application/shared-kernel/SharedKernel/Configuration/SharedInfrastructureConfiguration.cs b/application/shared-kernel/SharedKernel/Configuration/SharedInfrastructureConfiguration.cs index 7ebd8826ba..65721b90a5 100644 --- a/application/shared-kernel/SharedKernel/Configuration/SharedInfrastructureConfiguration.cs +++ b/application/shared-kernel/SharedKernel/Configuration/SharedInfrastructureConfiguration.cs @@ -1,3 +1,4 @@ +using Azure.Core; using Azure.Extensions.AspNetCore.Configuration.Secrets; using Azure.Identity; using Azure.Monitor.OpenTelemetry.AspNetCore; @@ -10,6 +11,7 @@ using Microsoft.Extensions.Configuration; using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.Hosting; +using Npgsql; using OpenTelemetry.Instrumentation.AspNetCore; using OpenTelemetry.Logs; using OpenTelemetry.Metrics; @@ -85,11 +87,27 @@ private IHostApplicationBuilder ConfigureDatabaseContext(string connectionNam ? Environment.GetEnvironmentVariable("DATABASE_CONNECTION_STRING") : builder.Configuration.GetConnectionString(connectionName); - builder.Services.AddDbContext(options => - options.UseSqlServer(connectionString, sqlOptions => - sqlOptions.UseCompatibilityLevel(150) // SQL Server 2019 compatibility to avoid native JSON type - ) - ); + if (IsRunningInAzure) + { + var dataSourceBuilder = new NpgsqlDataSourceBuilder(connectionString); + dataSourceBuilder.UsePeriodicPasswordProvider(async (_, cancellationToken) => + { + var token = await DefaultAzureCredential.GetTokenAsync(new TokenRequestContext(["https://ossrdbms-aad.database.windows.net/.default"]), cancellationToken); + return token.Token; + }, TimeSpan.FromMinutes(30), TimeSpan.FromSeconds(5) + ); + var dataSource = dataSourceBuilder.Build(); + builder.Services.AddSingleton(dataSource); + builder.Services.AddDbContext(options => + options.UseNpgsql(dataSource, o => o.MigrationsHistoryTable("__ef_migrations_history")).UseSnakeCaseNamingConvention() + ); + } + else + { + builder.Services.AddDbContext(options => + options.UseNpgsql(connectionString, o => o.MigrationsHistoryTable("__ef_migrations_history")).UseSnakeCaseNamingConvention() + ); + } return builder; } diff --git a/application/shared-kernel/SharedKernel/Database/DataMigrationRunner.cs b/application/shared-kernel/SharedKernel/Database/DataMigrationRunner.cs index 1a5368f221..b31dc915a5 100644 --- a/application/shared-kernel/SharedKernel/Database/DataMigrationRunner.cs +++ b/application/shared-kernel/SharedKernel/Database/DataMigrationRunner.cs @@ -1,17 +1,18 @@ using System.Data; +using System.Security.Cryptography; +using System.Text; using System.Text.RegularExpressions; -using Microsoft.Data.SqlClient; using Microsoft.EntityFrameworkCore; using Microsoft.EntityFrameworkCore.Storage; using Microsoft.Extensions.DependencyInjection; +using Npgsql; namespace SharedKernel.Database; public sealed class DataMigrationRunner(TContext dbContext, IServiceProvider serviceProvider, ILogger> logger) where TContext : DbContext { - private const int LockTimeoutSeconds = 300; - private static readonly string LockName = $"DataMigrationLock_{typeof(TContext).Name}"; + private static readonly long LockKey = BitConverter.ToInt64(SHA256.HashData(Encoding.UTF8.GetBytes(typeof(TContext).FullName!))); public async Task RunMigrationsAsync(CancellationToken cancellationToken) { @@ -21,36 +22,17 @@ public async Task RunMigrationsAsync(CancellationToken cancellationToken) return; } - logger.LogInformation("Acquiring an exclusive lock for data migration application. This may take a while if data migrations are already being applied."); + logger.LogInformation("Acquiring an exclusive lock for data migration application. This may take a while if data migrations are already being applied"); - await using var connection = (SqlConnection)dbContext.Database.GetDbConnection(); - await connection.OpenAsync(cancellationToken); + await using var connection = serviceProvider.GetService(typeof(NpgsqlDataSource)) is NpgsqlDataSource npgsqlDataSource + ? await npgsqlDataSource.OpenConnectionAsync(cancellationToken) + : (NpgsqlConnection)dbContext.Database.GetDbConnection(); + if (connection.State != ConnectionState.Open) await connection.OpenAsync(cancellationToken); await using var lockCommand = connection.CreateCommand(); - lockCommand.CommandText = "sp_getapplock"; - lockCommand.CommandType = CommandType.StoredProcedure; - lockCommand.Parameters.AddWithValue("@Resource", LockName); - lockCommand.Parameters.AddWithValue("@LockMode", "Exclusive"); - lockCommand.Parameters.AddWithValue("@LockOwner", "Session"); - lockCommand.Parameters.AddWithValue("@LockTimeout", LockTimeoutSeconds * 1000); - - var returnParam = new SqlParameter("@ReturnValue", SqlDbType.Int) { Direction = ParameterDirection.ReturnValue }; - lockCommand.Parameters.Add(returnParam); - + lockCommand.CommandText = "SELECT pg_advisory_lock(@key)"; + lockCommand.Parameters.AddWithValue("key", LockKey); await lockCommand.ExecuteNonQueryAsync(cancellationToken); - var lockResult = (int)returnParam.Value!; - - if (lockResult < 0) - { - var message = lockResult switch - { - -1 => "Timeout waiting for data migration lock after 5 minutes. This may indicate migrations are taking too long or multiple workers are queued.", - -2 => "Data migration lock request was canceled.", - -3 => "Data migration lock request was chosen as deadlock victim.", - _ => $"Failed to acquire data migration lock with error code {lockResult}." - }; - throw new InvalidOperationException(message); - } try { @@ -80,12 +62,21 @@ public async Task RunMigrationsAsync(CancellationToken cancellationToken) } finally { - await using var releaseLockCommand = connection.CreateCommand(); - releaseLockCommand.CommandText = "sp_releaseapplock"; - releaseLockCommand.CommandType = CommandType.StoredProcedure; - releaseLockCommand.Parameters.AddWithValue("@Resource", LockName); - releaseLockCommand.Parameters.AddWithValue("@LockOwner", "Session"); - await releaseLockCommand.ExecuteNonQueryAsync(cancellationToken); + try + { + await using var releaseLockCommand = connection.CreateCommand(); + releaseLockCommand.CommandText = "SELECT pg_advisory_unlock(@key)"; + releaseLockCommand.Parameters.AddWithValue("key", LockKey); + var unlocked = (bool)(await releaseLockCommand.ExecuteScalarAsync(CancellationToken.None))!; + if (!unlocked) + { + logger.LogWarning("Advisory lock {LockKey} was not held when attempting to release", LockKey); + } + } + catch (Exception ex) + { + logger.LogWarning(ex, "Failed to release advisory lock {LockKey}", LockKey); + } } } @@ -109,6 +100,11 @@ private List DiscoverDataMigrations() { throw new InvalidOperationException($"Data migration class name '{actualClassName}' must match ID suffix '{expectedClassName}'"); } + + if (migration.Timeout <= TimeSpan.Zero || migration.Timeout > TimeSpan.FromMinutes(20)) + { + throw new InvalidOperationException($"Data migration '{migration.Id}' timeout {migration.Timeout} must be between 1 second and 20 minutes."); + } } return migrations; @@ -117,17 +113,27 @@ private List DiscoverDataMigrations() private async Task EnsureDataMigrationHistoryTableExistsAsync(CancellationToken cancellationToken) { var sql = """ - IF OBJECT_ID(N'[__DataMigrationsHistory]') IS NULL + DO $$ BEGIN - CREATE TABLE [__DataMigrationsHistory] ( - [MigrationId] nvarchar(150) NOT NULL, - [ProductVersion] nvarchar(32) NOT NULL, - [ExecutedAt] datetimeoffset NOT NULL, - [ExecutionTimeMs] bigint NOT NULL, - [Summary] nvarchar(max) NOT NULL, - CONSTRAINT [PK___DataMigrationsHistory] PRIMARY KEY ([MigrationId]) - ); - END; + IF EXISTS (SELECT 1 FROM information_schema.tables WHERE table_name = '__DataMigrationsHistory') THEN + ALTER TABLE "__DataMigrationsHistory" RENAME TO __data_migrations_history; + ALTER TABLE __data_migrations_history RENAME COLUMN "MigrationId" TO migration_id; + ALTER TABLE __data_migrations_history RENAME COLUMN "ProductVersion" TO product_version; + ALTER TABLE __data_migrations_history RENAME COLUMN "ExecutedAt" TO executed_at; + ALTER TABLE __data_migrations_history RENAME COLUMN "ExecutionTimeMs" TO execution_time_ms; + ALTER TABLE __data_migrations_history RENAME COLUMN "Summary" TO summary; + ALTER TABLE __data_migrations_history RENAME CONSTRAINT "PK___DataMigrationsHistory" TO pk___data_migrations_history; + END IF; + END $$; + + CREATE TABLE IF NOT EXISTS __data_migrations_history ( + migration_id text NOT NULL, + product_version text NOT NULL, + executed_at timestamptz NOT NULL, + execution_time_ms bigint NOT NULL, + summary text NOT NULL, + CONSTRAINT pk___data_migrations_history PRIMARY KEY (migration_id) + ); """; await dbContext.Database.ExecuteSqlRawAsync(sql, cancellationToken); @@ -136,7 +142,7 @@ CONSTRAINT [PK___DataMigrationsHistory] PRIMARY KEY ([MigrationId]) private async Task> GetExecutedDataMigrationsAsync(CancellationToken cancellationToken) { await using var command = dbContext.Database.GetDbConnection().CreateCommand(); - command.CommandText = "SELECT [MigrationId] FROM [__DataMigrationsHistory]"; + command.CommandText = "SELECT migration_id FROM __data_migrations_history"; command.Transaction = dbContext.Database.CurrentTransaction?.GetDbTransaction(); var executedDataMigrations = new HashSet(); @@ -152,27 +158,47 @@ private async Task> GetExecutedDataMigrationsAsync(CancellationT private async Task ExecuteMigrationAsync(IDataMigration dataMigration, CancellationToken cancellationToken) { - logger.LogInformation("Executing data migration: '{MigrationId}'", dataMigration.Id); + logger.LogInformation("Executing data migration: '{MigrationId}' with timeout {Timeout}", dataMigration.Id, dataMigration.Timeout); var stopwatch = Stopwatch.StartNew(); + using var timeoutCancellationTokenSource = new CancellationTokenSource(dataMigration.Timeout); + using var linkedCancellationTokenSource = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken, timeoutCancellationTokenSource.Token); + var linkedCancellationToken = linkedCancellationTokenSource.Token; + + if (dataMigration.ManagesOwnTransactions) + { + var summary = await dataMigration.ExecuteAsync(linkedCancellationToken); + + await using var historyTransaction = await dbContext.Database.BeginTransactionAsync(linkedCancellationToken); + await RecordDataMigrationAsync(dataMigration.Id, stopwatch.ElapsedMilliseconds, summary, linkedCancellationToken); + await historyTransaction.CommitAsync(linkedCancellationToken); + + logger.LogInformation( + "Completed data migration: '{MigrationId}' in {ElapsedMs}ms - {Summary}", + dataMigration.Id, + stopwatch.ElapsedMilliseconds, + summary + ); + return; + } var executionStrategy = dbContext.Database.CreateExecutionStrategy(); await executionStrategy.ExecuteAsync(async () => { - await using var transaction = await dbContext.Database.BeginTransactionAsync(cancellationToken); + await using var transaction = await dbContext.Database.BeginTransactionAsync(linkedCancellationToken); try { - var summary = await dataMigration.ExecuteAsync(cancellationToken); + var summary = await dataMigration.ExecuteAsync(linkedCancellationToken); if (dbContext.ChangeTracker.HasChanges()) { throw new InvalidOperationException($"Data migration '{dataMigration.Id}' has unsaved changes. Ensure you call dbContext.SaveChangesAsync() before returning from ExecuteAsync()."); } - await RecordDataMigrationAsync(dataMigration.Id, stopwatch.ElapsedMilliseconds, summary, cancellationToken); + await RecordDataMigrationAsync(dataMigration.Id, stopwatch.ElapsedMilliseconds, summary, linkedCancellationToken); - await transaction.CommitAsync(cancellationToken); + await transaction.CommitAsync(linkedCancellationToken); logger.LogInformation( "Completed data migration: '{MigrationId}' in {ElapsedMs}ms - {Summary}", @@ -184,7 +210,7 @@ await executionStrategy.ExecuteAsync(async () => catch (Exception ex) { logger.LogError(ex, "Failed to execute data migration: '{MigrationId}'", dataMigration.Id); - await transaction.RollbackAsync(cancellationToken); + await transaction.RollbackAsync(CancellationToken.None); throw; } } @@ -197,15 +223,15 @@ private async Task RecordDataMigrationAsync(string migrationId, long elapsedMs, await dbContext.Database.ExecuteSqlRawAsync( """ - INSERT INTO [__DataMigrationsHistory] ([MigrationId], [ProductVersion], [ExecutedAt], [ExecutionTimeMs], [Summary]) + INSERT INTO __data_migrations_history (migration_id, product_version, executed_at, execution_time_ms, summary) VALUES (@MigrationId, @ProductVersion, @ExecutedAt, @ExecutionTimeMs, @Summary); """, [ - new SqlParameter("@MigrationId", migrationId), - new SqlParameter("@ProductVersion", productVersion), - new SqlParameter("@ExecutedAt", DateTimeOffset.UtcNow), - new SqlParameter("@ExecutionTimeMs", elapsedMs), - new SqlParameter("@Summary", summary) + new NpgsqlParameter("@MigrationId", migrationId), + new NpgsqlParameter("@ProductVersion", productVersion), + new NpgsqlParameter("@ExecutedAt", serviceProvider.GetRequiredService().GetUtcNow()), + new NpgsqlParameter("@ExecutionTimeMs", elapsedMs), + new NpgsqlParameter("@Summary", summary) ], cancellationToken ); diff --git a/application/shared-kernel/SharedKernel/Database/IDataMigration.cs b/application/shared-kernel/SharedKernel/Database/IDataMigration.cs index c6a1823074..b3d5eb92ab 100644 --- a/application/shared-kernel/SharedKernel/Database/IDataMigration.cs +++ b/application/shared-kernel/SharedKernel/Database/IDataMigration.cs @@ -1,12 +1,21 @@ namespace SharedKernel.Database; -/// Data migration that runs after schema migrations. Tracked in __DataMigrationsHistory table. +/// Data migration that runs after schema migrations. Tracked in __data_migrations_history table. [UsedImplicitly(ImplicitUseTargetFlags.WithInheritors)] public interface IDataMigration { /// Migration ID in format 'YYYYMMDDHHmmss_ClassName'. Must match class name suffix. public string Id { get; } + /// Maximum duration for this migration. Must not exceed 20 minutes. + public TimeSpan Timeout { get; } + + /// + /// When true, the runner skips the transaction wrapper and the migration manages its own commits. Use for large + /// batch operations that need chunked processing. + /// + public bool ManagesOwnTransactions => false; + /// Executes migration, returns summary. Must call dbContext.SaveChangesAsync() first. public Task ExecuteAsync(CancellationToken cancellationToken); } diff --git a/application/shared-kernel/SharedKernel/EntityFramework/SharedKernelDbContext.cs b/application/shared-kernel/SharedKernel/EntityFramework/SharedKernelDbContext.cs index 5d019064ea..bd3b303499 100644 --- a/application/shared-kernel/SharedKernel/EntityFramework/SharedKernelDbContext.cs +++ b/application/shared-kernel/SharedKernel/EntityFramework/SharedKernelDbContext.cs @@ -46,16 +46,13 @@ protected override void OnModelCreating(ModelBuilder modelBuilder) // Set pluralized table names for all aggregates foreach (var entityType in modelBuilder.Model.GetEntityTypes()) { - var tableNameAnnotation = entityType.GetAnnotations().FirstOrDefault(a => a.Name == "Relational:TableName"); - if (tableNameAnnotation?.Value is not null) + if (entityType.IsOwned()) { - entityType.SetTableName(tableNameAnnotation.Value.ToString()); - } - else - { - var tableName = entityType.GetTableName()!.Pluralize(); - entityType.SetTableName(tableName); + continue; } + + var tableName = entityType.GetTableName()!.Pluralize(); + entityType.SetTableName(tableName); } // Ensures that all enum properties are stored as strings in the database. diff --git a/application/shared-kernel/SharedKernel/SharedKernel.csproj b/application/shared-kernel/SharedKernel/SharedKernel.csproj index f9783e2317..b314707de7 100644 --- a/application/shared-kernel/SharedKernel/SharedKernel.csproj +++ b/application/shared-kernel/SharedKernel/SharedKernel.csproj @@ -17,7 +17,6 @@ - @@ -34,7 +33,8 @@ - + + all runtime; build; native; contentfiles; analyzers; buildtransitive diff --git a/application/shared-kernel/Tests/EntityFramework/MapStronglyTypedStringTests.cs b/application/shared-kernel/Tests/EntityFramework/MapStronglyTypedStringTests.cs index e90a3b947e..f6c1638a96 100644 --- a/application/shared-kernel/Tests/EntityFramework/MapStronglyTypedStringTests.cs +++ b/application/shared-kernel/Tests/EntityFramework/MapStronglyTypedStringTests.cs @@ -40,7 +40,7 @@ public async Task MapStronglyTypedString_WhenSavingEntity_ShouldStoreStringValue // Assert var result = _connection.ExecuteScalar( - "SELECT ExternalId FROM TestAggregates WHERE Id = @id", + "SELECT external_id FROM test_aggregates WHERE id = @id", [new { id = testAggregate.Id }] ); result.Should().Be("ext_abc123"); @@ -51,13 +51,13 @@ public async Task MapStronglyTypedString_WhenReadingEntity_ShouldDeserializeCorr { // Arrange const long id = 123; - _connection.Insert("TestAggregates", + _connection.Insert("test_aggregates", [ - ("Id", id), - ("Name", "Test"), - ("Status", "Pending"), - ("ExternalId", "ext_xyz789"), - ("CreatedAt", DateTime.UtcNow.ToString("O")) + ("id", id), + ("name", "Test"), + ("status", "Pending"), + ("external_id", "ext_xyz789"), + ("created_at", DateTime.UtcNow.ToString("O")) ] ); @@ -77,13 +77,13 @@ public async Task MapStronglyTypedString_WhenQueryingByStringId_ShouldFindEntity { // Arrange var externalId = ExternalId.NewId("ext_findme"); - _connection.Insert("TestAggregates", + _connection.Insert("test_aggregates", [ - ("Id", 456L), - ("Name", "Test"), - ("Status", "Pending"), - ("ExternalId", externalId.Value), - ("CreatedAt", DateTime.UtcNow.ToString("O")) + ("id", 456L), + ("name", "Test"), + ("status", "Pending"), + ("external_id", externalId.Value), + ("created_at", DateTime.UtcNow.ToString("O")) ] ); diff --git a/application/shared-kernel/Tests/EntityFramework/UseStringForEnumsTests.cs b/application/shared-kernel/Tests/EntityFramework/UseStringForEnumsTests.cs index 10b09c8ee5..dfbe1c3499 100644 --- a/application/shared-kernel/Tests/EntityFramework/UseStringForEnumsTests.cs +++ b/application/shared-kernel/Tests/EntityFramework/UseStringForEnumsTests.cs @@ -40,7 +40,7 @@ public async Task UseStringForEnums_WhenSavingEntity_ShouldStoreEnumAsString() // Assert - Query the raw database to verify enum is stored as string var result = _connection.ExecuteScalar( - "SELECT Status FROM TestAggregates WHERE Id = @id", + "SELECT status FROM test_aggregates WHERE id = @id", [new { id = testAggregate.Id }] ); result.Should().Be("Active"); @@ -59,7 +59,7 @@ public async Task UseStringForEnums_WhenSavingEntityWithNullableEnum_ShouldStore // Assert - Query the raw database to verify nullable enum is stored as string var result = _connection.ExecuteScalar( - "SELECT NullableStatus FROM TestAggregates WHERE Id = @id", + "SELECT nullable_status FROM test_aggregates WHERE id = @id", [new { id = testAggregate.Id }] ); result.Should().Be("Completed"); @@ -78,7 +78,7 @@ public async Task UseStringForEnums_WhenSavingEntityWithNullEnum_ShouldStoreNull // Assert - Query the raw database to verify null is stored var result = _connection.ExecuteScalar( - "SELECT NullableStatus FROM TestAggregates WHERE Id = @id", + "SELECT nullable_status FROM test_aggregates WHERE id = @id", [new { id = testAggregate.Id }] ); result.Should().BeNull(); diff --git a/application/shared-kernel/Tests/Persistence/SqliteConnectionExtensions.cs b/application/shared-kernel/Tests/Persistence/SqliteConnectionExtensions.cs index 26a759cf61..3c19fb1b18 100644 --- a/application/shared-kernel/Tests/Persistence/SqliteConnectionExtensions.cs +++ b/application/shared-kernel/Tests/Persistence/SqliteConnectionExtensions.cs @@ -31,13 +31,13 @@ public T ExecuteScalar(string sql, object[] parameters) public bool RowExists(string tableName, string id) { object[] parameters = [new { id }]; - return connection.ExecuteScalar($"SELECT COUNT(*) FROM {tableName} WHERE Id = @id", parameters) == 1; + return connection.ExecuteScalar($"SELECT COUNT(*) FROM {tableName} WHERE id = @id", parameters) == 1; } public bool RowExists(string tableName, long id) { object[] parameters = [new { id }]; - return connection.ExecuteScalar($"SELECT COUNT(*) FROM {tableName} WHERE Id = @id", parameters) == 1; + return connection.ExecuteScalar($"SELECT COUNT(*) FROM {tableName} WHERE id = @id", parameters) == 1; } public void Insert(string tableName, (string, object?)[] columns) @@ -123,7 +123,7 @@ public void Update(string tableName, string idColumnName, object idValue, (strin public void Delete(string tableName, string id) { - using var command = new SqliteCommand($"DELETE FROM {tableName} WHERE Id = @id", connection); + using var command = new SqliteCommand($"DELETE FROM {tableName} WHERE id = @id", connection); command.Parameters.AddWithValue("@id", id); command.ExecuteNonQuery(); } diff --git a/application/shared-kernel/Tests/TestEntities/SqliteInMemoryDbContextFactory.cs b/application/shared-kernel/Tests/TestEntities/SqliteInMemoryDbContextFactory.cs index ca985e8ddc..3ef3fb341c 100644 --- a/application/shared-kernel/Tests/TestEntities/SqliteInMemoryDbContextFactory.cs +++ b/application/shared-kernel/Tests/TestEntities/SqliteInMemoryDbContextFactory.cs @@ -35,6 +35,6 @@ public T CreateContext() private DbContextOptions CreateOptions() { - return new DbContextOptionsBuilder().UseSqlite(_sqliteConnection).Options; + return new DbContextOptionsBuilder().UseSqlite(_sqliteConnection).UseSnakeCaseNamingConvention().Options; } } diff --git a/cloud-infrastructure/README.md b/cloud-infrastructure/README.md index 164910e50b..4379817c89 100644 --- a/cloud-infrastructure/README.md +++ b/cloud-infrastructure/README.md @@ -11,7 +11,7 @@ Please follow the simple instructions in [Getting started](/README.md#setting-up ## Folder structure - `environment`: Each environment (like `Staging` and `Production`) has resources that are shared between clusters, e.g., Azure Log Analytics workspace and Application Insights. This allows for central tracking and monitoring across clusters. No Personally Identifiable Information (PII) is tracked, which ensures compliance with data protection laws. See the [`environment/main-environment.bicep`](/cloud-infrastructure/environment/main-environment.bicep). -- `cluster`: Scripts to deploy a cluster into clearly named resource groups like `ppdemo-stage-weu`, `ppdemo-prod-weu`, and `ppdemo-prod-eus2`. A cluster has its own Azure Container Apps environment (managed Kubernetes), SQL Server, Azure Blob Storage, etc. Tenants (a.k.a. a customer) are created in a dedicated cluster that contains all data belonging to that tenant. This ensures compliance with data protection laws like GDPR, CCPA, PIPEDA, APPs, etc., through geo-isolation. See the [`cluster/main-cluster.bicep`](/cloud-infrastructure/cluster/main-cluster.bicep). +- `cluster`: Scripts to deploy a cluster into clearly named resource groups like `ppdemo-stage-weu`, `ppdemo-prod-weu`, and `ppdemo-prod-eus2`. A cluster has its own Azure Container Apps environment (managed Kubernetes), PostgreSQL, Azure Blob Storage, etc. Tenants (a.k.a. a customer) are created in a dedicated cluster that contains all data belonging to that tenant. This ensures compliance with data protection laws like GDPR, CCPA, PIPEDA, APPs, etc., through geo-isolation. See the [`cluster/main-cluster.bicep`](/cloud-infrastructure/cluster/main-cluster.bicep). - `modules`: Each Azure Resource is created by a separate Bicep module file, ensuring a modular, reusable, and manageable infrastructure. @@ -29,7 +29,7 @@ Please follow the simple instructions in [Getting started](/README.md#setting-up ## Folder structure - `environment`: Each environment (like `Staging` and `Production`) has resources that are shared between clusters, e.g., Azure Log Analytics workspace and Application Insights. This allows for central tracking and monitoring across clusters. No Personally Identifiable Information (PII) is tracked, which ensures compliance with data protection laws. See the [`environment/main-environment.bicep`](/cloud-infrastructure/environment/main-environment.bicep). -- `cluster`: Scripts to deploy a cluster into clearly named resource groups like `ppdemo-stage-weu`, `ppdemo-prod-weu`, and `ppdemo-prod-eus2`. A cluster has its own Azure Container Apps environment (managed Kubernetes), SQL Server, Azure Blob Storage, etc. Tenants (a.k.a. a customer) are created in a dedicated cluster that contains all data belonging to that tenant. This ensures compliance with data protection laws like GDPR, CCPA, PIPEDA, APPs, etc., through geo-isolation. See the [`cluster/main-cluster.bicep`](/cloud-infrastructure/cluster/main-cluster.bicep). +- `cluster`: Scripts to deploy a cluster into clearly named resource groups like `ppdemo-stage-weu`, `ppdemo-prod-weu`, and `ppdemo-prod-eus2`. A cluster has its own Azure Container Apps environment (managed Kubernetes), PostgreSQL, Azure Blob Storage, etc. Tenants (a.k.a. a customer) are created in a dedicated cluster that contains all data belonging to that tenant. This ensures compliance with data protection laws like GDPR, CCPA, PIPEDA, APPs, etc., through geo-isolation. See the [`cluster/main-cluster.bicep`](/cloud-infrastructure/cluster/main-cluster.bicep). - `modules`: Each Azure Resource is created by a separate Bicep module file, ensuring a modular, reusable, and manageable infrastructure. @@ -48,8 +48,8 @@ There are a couple of exceptions: Examples of cluster-specific resources: - Resource Group: `ppdemo-stage-weu`, `ppdemo-prod-eus2` -- SQL Server: `ppdemo-stage-weu`, `ppdemo-prod-eus2` -- SQL Server database: `main`, `account`, `back-office` +- PostgreSQL: `ppdemo-stage-weu`, `ppdemo-prod-eus2` +- PostgreSQL database: `main`, `account`, `back-office` - Azure Container App Environment: `ppdemo-stage-weu`, `ppdemo-prod-eus2` - Azure Container Apps: `main-api`, `account-api`, `back-office-worker` - Managed Identity: `ppdemo-stage-weu-main`, `ppdemo-stage-weu-account`, `ppdemo-prod-eus2-back-office` diff --git a/cloud-infrastructure/cluster/add-postgres-admin.sh b/cloud-infrastructure/cluster/add-postgres-admin.sh new file mode 100755 index 0000000000..c3825dfbc8 --- /dev/null +++ b/cloud-infrastructure/cluster/add-postgres-admin.sh @@ -0,0 +1,18 @@ +set -e + +UNIQUE_PREFIX=$1 +ENVIRONMENT=$2 +CLUSTER_LOCATION_ACRONYM=$3 +POSTGRES_ADMIN_OBJECT_ID=$4 + +CLUSTER_RESOURCE_GROUP_NAME=$UNIQUE_PREFIX-$ENVIRONMENT-$CLUSTER_LOCATION_ACRONYM +POSTGRES_SERVER_NAME=$CLUSTER_RESOURCE_GROUP_NAME + +echo "$(date +"%Y-%m-%dT%H:%M:%S") Adding Entra ID group $POSTGRES_ADMIN_OBJECT_ID as admin on PostgreSQL server $POSTGRES_SERVER_NAME" + +az postgres flexible-server microsoft-entra-admin create \ + --resource-group $CLUSTER_RESOURCE_GROUP_NAME \ + --server-name $POSTGRES_SERVER_NAME \ + --display-name "PostgreSQL Admins" \ + --object-id $POSTGRES_ADMIN_OBJECT_ID \ + --type Group diff --git a/cloud-infrastructure/cluster/deploy-cluster.sh b/cloud-infrastructure/cluster/deploy-cluster.sh index c0f1f2b433..2bf396840f 100755 --- a/cloud-infrastructure/cluster/deploy-cluster.sh +++ b/cloud-infrastructure/cluster/deploy-cluster.sh @@ -4,7 +4,7 @@ UNIQUE_PREFIX=$1 ENVIRONMENT=$2 CLUSTER_LOCATION=$3 CLUSTER_LOCATION_ACRONYM=$4 -SQL_ADMIN_OBJECT_ID=$5 +POSTGRES_ADMIN_OBJECT_ID=$5 DOMAIN_NAME=$6 get_active_version() @@ -29,7 +29,6 @@ export UNIQUE_PREFIX export ENVIRONMENT export LOCATION=$CLUSTER_LOCATION export DOMAIN_NAME -export SQL_ADMIN_OBJECT_ID export GOOGLE_OAUTH_CLIENT_ID export GOOGLE_OAUTH_CLIENT_SECRET export STRIPE_PUBLISHABLE_KEY @@ -97,7 +96,7 @@ then exit 1 fi - # Extract the ID of the Managed Identities, which can be used to grant access to SQL Database + # Extract the ID of the Managed Identities, which can be used to grant access to PostgreSQL databases ACCOUNT_IDENTITY_CLIENT_ID=$(echo "$cleaned_output" | jq -r '.properties.outputs.accountIdentityClientId.value') BACK_OFFICE_IDENTITY_CLIENT_ID=$(echo "$cleaned_output" | jq -r '.properties.outputs.backOfficeIdentityClientId.value') MAIN_IDENTITY_CLIENT_ID=$(echo "$cleaned_output" | jq -r '.properties.outputs.mainIdentityClientId.value') @@ -106,6 +105,7 @@ then echo "BACK_OFFICE_IDENTITY_CLIENT_ID=$BACK_OFFICE_IDENTITY_CLIENT_ID" >> $GITHUB_OUTPUT echo "MAIN_IDENTITY_CLIENT_ID=$MAIN_IDENTITY_CLIENT_ID" >> $GITHUB_OUTPUT else + . ./add-postgres-admin.sh $UNIQUE_PREFIX $ENVIRONMENT $CLUSTER_LOCATION_ACRONYM $POSTGRES_ADMIN_OBJECT_ID . ./grant-database-permissions.sh $UNIQUE_PREFIX $ENVIRONMENT $CLUSTER_LOCATION_ACRONYM 'account' $ACCOUNT_IDENTITY_CLIENT_ID . ./grant-database-permissions.sh $UNIQUE_PREFIX $ENVIRONMENT $CLUSTER_LOCATION_ACRONYM 'back-office' $BACK_OFFICE_IDENTITY_CLIENT_ID . ./grant-database-permissions.sh $UNIQUE_PREFIX $ENVIRONMENT $CLUSTER_LOCATION_ACRONYM 'main' $MAIN_IDENTITY_CLIENT_ID diff --git a/cloud-infrastructure/cluster/firewall.sh b/cloud-infrastructure/cluster/firewall.sh index 7f3c9d2c7e..81af5cea2e 100644 --- a/cloud-infrastructure/cluster/firewall.sh +++ b/cloud-infrastructure/cluster/firewall.sh @@ -1,11 +1,15 @@ -IP_ADDRESS=$(curl -s https://api.ipify.org) -FIREWALL_RULE_NAME="GitHub Action Workflows - ${SQL_DATABASE_NAME} - Only active when deploying" +IP_ADDRESS=$(curl -sf https://api.ipify.org) +if [[ -z "$IP_ADDRESS" ]] || ! [[ "$IP_ADDRESS" =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then + echo "ERROR: Failed to resolve public IP address" + exit 1 +fi +FIREWALL_RULE_NAME="github-action-${DATABASE_NAME}" if [[ "$1" == "open" ]] then - echo "$(date +"%Y-%m-%dT%H:%M:%S") Add the IP $IP_ADDRESS to the SQL Server firewall on server $SQL_SERVER_NAME for database $SQL_DATABASE_NAME" - az sql server firewall-rule create --resource-group $CLUSTER_RESOURCE_GROUP_NAME --server $SQL_SERVER_NAME --name "$FIREWALL_RULE_NAME" --start-ip-address $IP_ADDRESS --end-ip-address $IP_ADDRESS + echo "$(date +"%Y-%m-%dT%H:%M:%S") Add the IP $IP_ADDRESS to the PostgreSQL server firewall on server $POSTGRES_SERVER_NAME for database $DATABASE_NAME" + az postgres flexible-server firewall-rule create --resource-group $CLUSTER_RESOURCE_GROUP_NAME --name $POSTGRES_SERVER_NAME --rule-name "$FIREWALL_RULE_NAME" --start-ip-address $IP_ADDRESS --end-ip-address $IP_ADDRESS else - echo "$(date +"%Y-%m-%dT%H:%M:%S") Delete the IP $IP_ADDRESS from the SQL Server firewall on server $SQL_SERVER_NAME for database $SQL_DATABASE_NAME" - az sql server firewall-rule delete --resource-group $CLUSTER_RESOURCE_GROUP_NAME --server $SQL_SERVER_NAME --name "$FIREWALL_RULE_NAME" + echo "$(date +"%Y-%m-%dT%H:%M:%S") Delete the IP $IP_ADDRESS from the PostgreSQL server firewall on server $POSTGRES_SERVER_NAME for database $DATABASE_NAME" + az postgres flexible-server firewall-rule delete --resource-group $CLUSTER_RESOURCE_GROUP_NAME --name $POSTGRES_SERVER_NAME --rule-name "$FIREWALL_RULE_NAME" --yes fi diff --git a/cloud-infrastructure/cluster/grant-database-permissions.sh b/cloud-infrastructure/cluster/grant-database-permissions.sh index f5419e4d5a..a38ee649aa 100755 --- a/cloud-infrastructure/cluster/grant-database-permissions.sh +++ b/cloud-infrastructure/cluster/grant-database-permissions.sh @@ -1,41 +1,37 @@ +set -e + UNIQUE_PREFIX=$1 ENVIRONMENT=$2 CLUSTER_LOCATION_ACRONYM=$3 -SQL_DATABASE_NAME=$4 +DATABASE_NAME=$4 MANAGED_IDENTITY_CLIENT_ID=$5 CLUSTER_RESOURCE_GROUP_NAME=$UNIQUE_PREFIX-$ENVIRONMENT-$CLUSTER_LOCATION_ACRONYM MANAGED_IDENTITY_NAME=$CLUSTER_RESOURCE_GROUP_NAME-$4 -SQL_SERVER_NAME=$CLUSTER_RESOURCE_GROUP_NAME -SQL_SERVER=$SQL_SERVER_NAME.database.windows.net - -cd "$(dirname "${BASH_SOURCE[0]}")" -# Export SQL_DATABASE_NAME for firewall.sh to use -export SQL_DATABASE_NAME=$SQL_DATABASE_NAME -trap '. ./firewall.sh close' EXIT # Ensure that the firewall is closed no matter if other commands fail -. ./firewall.sh open +POSTGRES_SERVER_NAME=$CLUSTER_RESOURCE_GROUP_NAME +POSTGRES_HOST=$POSTGRES_SERVER_NAME.postgres.database.azure.com -# Convert the ClientId of the Managed Identity to the binary version. The following bash script is equivalent to this PowerShell: -# $SID = "0x" + [System.BitConverter]::ToString(([guid]$SID).ToByteArray()).Replace("-", "") -SID=$(echo $MANAGED_IDENTITY_CLIENT_ID | tr 'a-f' 'A-F' | tr -d '-') # Convert to uppercase and remove hyphens -SID=$(awk -v id="$SID" 'BEGIN { - printf "0x%s%s%s%s\n", - substr(id,7,2) substr(id,5,2) substr(id,3,2) substr(id,1,2), - substr(id,11,2) substr(id,9,2), - substr(id,15,2) substr(id,13,2), - substr(id,17) -}') # Reverse the byte order for the first three sections of the GUID and concatenate +ACCESS_TOKEN=$(az account get-access-token --resource-type oss-rdbms --query accessToken --output tsv) +ENTRA_USER=$(az postgres flexible-server microsoft-entra-admin list --resource-group $CLUSTER_RESOURCE_GROUP_NAME --server-name $POSTGRES_SERVER_NAME --query "[0].principalName" --output tsv) -echo "$(date +"%Y-%m-%dT%H:%M:%S") Granting $MANAGED_IDENTITY_NAME (ID: $SID) in Resource group $CLUSTER_RESOURCE_GROUP_NAME permissions on $SQL_SERVER/$SQL_DATABASE_NAME database" +echo "$(date +"%Y-%m-%dT%H:%M:%S") Granting $MANAGED_IDENTITY_NAME permissions on $POSTGRES_HOST/$DATABASE_NAME" -# Execute the SQL script using mssql-scripter. Pass the script as a heredoc to sqlcmd to allow for complex SQL. -sqlcmd -S $SQL_SERVER -d $SQL_DATABASE_NAME --authentication-method=ActiveDirectoryDefault --exit-on-error << EOF -IF NOT EXISTS (SELECT [name] FROM [sys].[database_principals] WHERE [name] = '$MANAGED_IDENTITY_NAME' AND [type] = 'E') +PGPASSWORD=$ACCESS_TOKEN psql -v ON_ERROR_STOP=1 "host=$POSTGRES_HOST dbname=postgres user='$ENTRA_USER' sslmode=verify-full sslrootcert=system" << EOF +DO \$\$ BEGIN - CREATE USER [$MANAGED_IDENTITY_NAME] WITH SID = $SID, TYPE = E; - ALTER ROLE db_datareader ADD MEMBER [$MANAGED_IDENTITY_NAME]; - ALTER ROLE db_datawriter ADD MEMBER [$MANAGED_IDENTITY_NAME]; - ALTER ROLE db_ddladmin ADD MEMBER [$MANAGED_IDENTITY_NAME]; + IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = '$MANAGED_IDENTITY_NAME') THEN + PERFORM pgaadauth_create_principal('$MANAGED_IDENTITY_NAME', false, false); + END IF; END -GO +\$\$; +EOF + +PGPASSWORD=$ACCESS_TOKEN psql -v ON_ERROR_STOP=1 "host=$POSTGRES_HOST dbname=$DATABASE_NAME user='$ENTRA_USER' sslmode=verify-full sslrootcert=system" << EOF +CREATE EXTENSION IF NOT EXISTS pg_stat_statements; +GRANT CONNECT ON DATABASE "$DATABASE_NAME" TO "$MANAGED_IDENTITY_NAME"; +GRANT USAGE ON SCHEMA public TO "$MANAGED_IDENTITY_NAME"; +GRANT SELECT, INSERT, UPDATE, DELETE ON ALL TABLES IN SCHEMA public TO "$MANAGED_IDENTITY_NAME"; +GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO "$MANAGED_IDENTITY_NAME"; +ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO "$MANAGED_IDENTITY_NAME"; +ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT USAGE, SELECT ON SEQUENCES TO "$MANAGED_IDENTITY_NAME"; EOF diff --git a/cloud-infrastructure/cluster/main-cluster.bicep b/cloud-infrastructure/cluster/main-cluster.bicep index 5742a57653..63fd4e9ff9 100644 --- a/cloud-infrastructure/cluster/main-cluster.bicep +++ b/cloud-infrastructure/cluster/main-cluster.bicep @@ -6,7 +6,6 @@ param globalResourceGroupName string param environment string param containerRegistryName string param domainName string -param sqlAdminObjectId string param appGatewayVersion string param accountVersion string param backOfficeVersion string @@ -46,14 +45,6 @@ resource existingLogAnalyticsWorkspace 'Microsoft.OperationalInsights/workspaces name: resourceNamePrefix } -var subnetId = resourceId( - subscription().subscriptionId, - clusterResourceGroupName, - 'Microsoft.Network/virtualNetworks/subnets', - clusterResourceGroupName, - 'subnet' -) - var diagnosticStorageAccountName = '${storageAccountUniquePrefix}diagnostic' module diagnosticStorageAccount '../modules/storage-account.bicep' = { scope: clusterResourceGroup @@ -66,6 +57,15 @@ module diagnosticStorageAccount '../modules/storage-account.bicep' = { } } +module diagnosticStorageRetention '../modules/storage-account-retention.bicep' = { + scope: clusterResourceGroup + name: '${clusterResourceGroupName}-diagnostic-storage-retention' + params: { + storageAccountName: diagnosticStorageAccount.outputs.name + retentionDays: 90 + } +} + module virtualNetwork '../modules/virtual-network.bicep' = { scope: clusterResourceGroup name: '${clusterResourceGroupName}-virtual-network' @@ -84,11 +84,10 @@ module containerAppsEnvironment '../modules/container-apps-environment.bicep' = location: location name: clusterResourceGroupName tags: tags - subnetId: subnetId + subnetId: virtualNetwork.outputs.containerAppsSubnetId globalResourceGroupName: globalResourceGroupName logAnalyticsWorkspaceName: resourceNamePrefix } - dependsOn: [virtualNetwork] } module keyVault '../modules/key-vault.bicep' = { @@ -99,11 +98,10 @@ module keyVault '../modules/key-vault.bicep' = { name: clusterResourceGroupName tags: tags tenantId: subscription().tenantId - subnetId: subnetId + subnetId: virtualNetwork.outputs.containerAppsSubnetId storageAccountId: diagnosticStorageAccount.outputs.storageAccountId workspaceId: existingLogAnalyticsWorkspace.id } - dependsOn: [virtualNetwork] } module googleOAuthSecrets '../modules/key-vault-secrets.bicep' = if (!empty(googleOAuthClientId) && !empty(googleOAuthClientSecret)) { @@ -143,30 +141,19 @@ module communicationService '../modules/communication-services.bicep' = { } } -module microsoftSqlServer '../modules/microsoft-sql-server.bicep' = { +module postgresServer '../modules/postgresql-flexible-server.bicep' = { scope: clusterResourceGroup - name: '${clusterResourceGroupName}-microsoft-sql-server' + name: '${clusterResourceGroupName}-postgresql-server' params: { location: location name: clusterResourceGroupName tags: tags - subnetId: subnetId tenantId: subscription().tenantId - sqlAdminObjectId: sqlAdminObjectId - } - dependsOn: [virtualNetwork] -} - -module microsoftSqlDerverDiagnosticConfiguration '../modules/microsoft-sql-server-diagnostic.bicep' = { - scope: clusterResourceGroup - name: '${clusterResourceGroupName}-microsoft-sql-server-diagnostic' - params: { - diagnosticStorageAccountName: diagnosticStorageAccountName - microsoftSqlServerName: clusterResourceGroupName - dianosticStorageAccountBlobEndpoint: diagnosticStorageAccount.outputs.blobEndpoint - dianosticStorageAccountSubscriptionId: subscription().subscriptionId + subnetId: virtualNetwork.outputs.privateEndpointSubnetId + virtualNetworkId: virtualNetwork.outputs.virtualNetworkId + isProduction: environment == 'prod' + diagnosticStorageAccountId: diagnosticStorageAccount.outputs.storageAccountId } - dependsOn: [microsoftSqlServer] } var isCustomDomainSet = domainName != '' @@ -192,16 +179,13 @@ module accountIdentity '../modules/user-assigned-managed-identity.bicep' = { } } -module accountDatabase '../modules/microsoft-sql-database.bicep' = { - name: '${clusterResourceGroupName}-account-sql-database' +module accountDatabase '../modules/postgresql-flexible-database.bicep' = { + name: '${clusterResourceGroupName}-account-postgres-database' scope: clusterResourceGroup params: { - sqlServerName: clusterResourceGroupName + serverName: postgresServer.outputs.serverName databaseName: 'account' - location: location - tags: tags } - dependsOn: [microsoftSqlServer] } var accountStorageAccountName = '${storageAccountUniquePrefix}account' @@ -213,7 +197,7 @@ module accountStorageAccount '../modules/storage-account.bicep' = { name: accountStorageAccountName tags: tags sku: 'Standard_GRS' - userAssignedIdentityName: accountIdentityName + userAssignedIdentityName: accountIdentity.outputs.name containers: [ { name: 'avatars' @@ -225,7 +209,6 @@ module accountStorageAccount '../modules/storage-account.bicep' = { } ] } - dependsOn: [accountIdentity] } var accountEnvironmentVariables = [ @@ -239,7 +222,7 @@ var accountEnvironmentVariables = [ } { name: 'DATABASE_CONNECTION_STRING' - value: '${accountDatabase.outputs.connectionString};User Id=${accountIdentity.outputs.clientId};' + value: '${accountDatabase.outputs.connectionString};Username=${accountIdentityName}' } { name: 'KEYVAULT_URL' @@ -338,16 +321,13 @@ module backOfficeIdentity '../modules/user-assigned-managed-identity.bicep' = { } } -module backOfficeDatabase '../modules/microsoft-sql-database.bicep' = { - name: '${clusterResourceGroupName}-back-office-sql-database' +module backOfficeDatabase '../modules/postgresql-flexible-database.bicep' = { + name: '${clusterResourceGroupName}-back-office-postgres-database' scope: clusterResourceGroup params: { - sqlServerName: clusterResourceGroupName + serverName: postgresServer.outputs.serverName databaseName: 'back-office' - location: location - tags: tags } - dependsOn: [microsoftSqlServer] } var backOfficeStorageAccountName = '${storageAccountUniquePrefix}backoffice' @@ -359,9 +339,8 @@ module backOfficeStorageAccount '../modules/storage-account.bicep' = { name: backOfficeStorageAccountName tags: tags sku: 'Standard_GRS' - userAssignedIdentityName: backOfficeIdentityName + userAssignedIdentityName: backOfficeIdentity.outputs.name } - dependsOn: [backOfficeIdentity] } var backOfficeEnvironmentVariables = [ @@ -375,7 +354,7 @@ var backOfficeEnvironmentVariables = [ } { name: 'DATABASE_CONNECTION_STRING' - value: '${backOfficeDatabase.outputs.connectionString};User Id=${backOfficeIdentity.outputs.clientId};' + value: '${backOfficeDatabase.outputs.connectionString};Username=${backOfficeIdentityName}' } { name: 'KEYVAULT_URL' @@ -466,16 +445,13 @@ module mainIdentity '../modules/user-assigned-managed-identity.bicep' = { } } -module mainDatabase '../modules/microsoft-sql-database.bicep' = { - name: '${clusterResourceGroupName}-main-sql-database' +module mainDatabase '../modules/postgresql-flexible-database.bicep' = { + name: '${clusterResourceGroupName}-main-postgres-database' scope: clusterResourceGroup params: { - sqlServerName: clusterResourceGroupName + serverName: postgresServer.outputs.serverName databaseName: 'main' - location: location - tags: tags } - dependsOn: [microsoftSqlServer] } var mainStorageAccountName = '${storageAccountUniquePrefix}main' @@ -487,9 +463,8 @@ module mainStorageAccount '../modules/storage-account.bicep' = { name: mainStorageAccountName tags: tags sku: 'Standard_GRS' - userAssignedIdentityName: mainIdentityName + userAssignedIdentityName: mainIdentity.outputs.name } - dependsOn: [mainIdentity] } var mainEnvironmentVariables = [ @@ -503,7 +478,7 @@ var mainEnvironmentVariables = [ } { name: 'DATABASE_CONNECTION_STRING' - value: '${mainDatabase.outputs.connectionString};User Id=${mainIdentity.outputs.clientId};' + value: '${mainDatabase.outputs.connectionString};Username=${mainIdentityName}' } { name: 'KEYVAULT_URL' @@ -663,10 +638,9 @@ module appGatewayAccountStorageBlobDataReaderRoleAssignment '../modules/role-ass scope: clusterResourceGroup name: '${clusterResourceGroupName}-app-gateway-account-blob-reader' params: { - storageAccountName: accountStorageAccountName - userAssignedIdentityName: appGatewayIdentityName + storageAccountName: accountStorageAccount.outputs.name + userAssignedIdentityName: appGatewayIdentity.outputs.name } - dependsOn: [appGateway, accountStorageAccount] } output accountIdentityClientId string = accountIdentity.outputs.clientId diff --git a/cloud-infrastructure/cluster/main-cluster.bicepparam b/cloud-infrastructure/cluster/main-cluster.bicepparam index 1ebb98829d..41810adea3 100644 --- a/cloud-infrastructure/cluster/main-cluster.bicepparam +++ b/cloud-infrastructure/cluster/main-cluster.bicepparam @@ -7,7 +7,6 @@ param globalResourceGroupName = readEnvironmentVariable('GLOBAL_RESOURCE_GROUP_N param environment = readEnvironmentVariable('ENVIRONMENT') param containerRegistryName = readEnvironmentVariable('CONTAINER_REGISTRY_NAME') param domainName = readEnvironmentVariable('DOMAIN_NAME', '') -param sqlAdminObjectId = readEnvironmentVariable('SQL_ADMIN_OBJECT_ID') param appGatewayVersion = readEnvironmentVariable('APP_GATEWAY_VERSION') param accountVersion = readEnvironmentVariable('ACCOUNT_VERSION') param backOfficeVersion = readEnvironmentVariable('BACK_OFFICE_VERSION') diff --git a/cloud-infrastructure/environment/main-environment.bicep b/cloud-infrastructure/environment/main-environment.bicep index e707317755..517a41c390 100644 --- a/cloud-infrastructure/environment/main-environment.bicep +++ b/cloud-infrastructure/environment/main-environment.bicep @@ -31,10 +31,9 @@ module productionServicePrincipalDataImporter '../modules/role-assignments-conta name: '${globalResourceGroupName}-production-sp-data-importer' scope: resourceGroup(globalResourceGroup.name) params: { - containerRegistryName: containerRegistryName + containerRegistryName: containerRegistry.outputs.name principalId: productionServicePrincipalObjectId } - dependsOn: [containerRegistry] } module logAnalyticsWorkspace '../modules/log-analytics-workspace.bicep' = { diff --git a/cloud-infrastructure/modules/container-apps-environment.bicep b/cloud-infrastructure/modules/container-apps-environment.bicep index a89ace1134..61979a113f 100644 --- a/cloud-infrastructure/modules/container-apps-environment.bicep +++ b/cloud-infrastructure/modules/container-apps-environment.bicep @@ -21,10 +21,13 @@ resource containerAppsEnvironment 'Microsoft.App/managedEnvironments@2025-07-01' vnetConfiguration: { internal: false infrastructureSubnetId: subnetId - dockerBridgeCidr: '10.2.0.1/16' - platformReservedCidr: '10.1.0.0/16' - platformReservedDnsIP: '10.1.0.2' } + workloadProfiles: [ + { + name: 'Consumption' + workloadProfileType: 'Consumption' + } + ] appLogsConfiguration: { destination: 'log-analytics' logAnalyticsConfiguration: { diff --git a/cloud-infrastructure/modules/container-registry.bicep b/cloud-infrastructure/modules/container-registry.bicep index aa7b532819..fb528a4fed 100644 --- a/cloud-infrastructure/modules/container-registry.bicep +++ b/cloud-infrastructure/modules/container-registry.bicep @@ -28,3 +28,5 @@ resource containerRegistry 'Microsoft.ContainerRegistry/registries@2025-11-01' = } } } + +output name string = containerRegistry.name diff --git a/cloud-infrastructure/modules/microsoft-sql-database.bicep b/cloud-infrastructure/modules/microsoft-sql-database.bicep deleted file mode 100644 index af14ed51c4..0000000000 --- a/cloud-infrastructure/modules/microsoft-sql-database.bicep +++ /dev/null @@ -1,21 +0,0 @@ -param sqlServerName string -param databaseName string -param location string -param tags object - -resource sqlDatabase 'Microsoft.Sql/servers/databases@2023-08-01' = { - name: '${sqlServerName}/${databaseName}' - location: location - tags: tags - sku: { - name: 'Basic' - tier: 'Basic' - capacity: 5 - } - properties: { - collation: 'SQL_Latin1_General_CP1_CI_AS' - zoneRedundant: false - } -} - -output connectionString string = 'Server=tcp:${sqlServerName}${environment().suffixes.sqlServerHostname},1433;Initial Catalog=${databaseName};Authentication=Active Directory Default;TrustServerCertificate=True' diff --git a/cloud-infrastructure/modules/microsoft-sql-server-diagnostic.bicep b/cloud-infrastructure/modules/microsoft-sql-server-diagnostic.bicep deleted file mode 100644 index 385ecfe394..0000000000 --- a/cloud-infrastructure/modules/microsoft-sql-server-diagnostic.bicep +++ /dev/null @@ -1,57 +0,0 @@ -param diagnosticStorageAccountName string -param microsoftSqlServerName string -param dianosticStorageAccountSubscriptionId string -param dianosticStorageAccountBlobEndpoint string - -resource existingMicrosoftSqlServer 'Microsoft.Sql/servers@2023-08-01' existing = { - name: microsoftSqlServerName -} - -var contributorPrincipalId = existingMicrosoftSqlServer.identity.principalId - -module diagnosticStorageBlobDataContributorRoleAssignment './role-assignments-storage-blob-data-contributor.bicep' = { - name: '${microsoftSqlServerName}-microsoft-sql-server-blob-contributer' - params: { - storageAccountName: diagnosticStorageAccountName - principalId: contributorPrincipalId - } -} - -resource microsoftSqlServerOutboundFirewallRules 'Microsoft.Sql/servers/outboundFirewallRules@2023-08-01' = { - parent: existingMicrosoftSqlServer - name: replace(replace(dianosticStorageAccountBlobEndpoint, 'https:', ''), '/', '') - dependsOn: [diagnosticStorageBlobDataContributorRoleAssignment] -} - -resource microsoftSqlServerAuditingSettings 'Microsoft.Sql/servers/auditingSettings@2023-08-01' = { - parent: existingMicrosoftSqlServer - name: 'default' - properties: { - retentionDays: 90 - auditActionsAndGroups: [ - 'SUCCESSFUL_DATABASE_AUTHENTICATION_GROUP' - 'FAILED_DATABASE_AUTHENTICATION_GROUP' - 'BATCH_COMPLETED_GROUP' - ] - isAzureMonitorTargetEnabled: true - isManagedIdentityInUse: true - state: 'Enabled' - storageEndpoint: dianosticStorageAccountBlobEndpoint - storageAccountSubscriptionId: dianosticStorageAccountSubscriptionId - } - dependsOn: [microsoftSqlServerOutboundFirewallRules] -} - -resource microsoftSqlServerVulnerabilityAssessment 'Microsoft.Sql/servers/vulnerabilityAssessments@2023-08-01' = { - name: 'default' - parent: existingMicrosoftSqlServer - properties: { - recurringScans: { - emails: [''] - emailSubscriptionAdmins: true - isEnabled: true - } - storageContainerPath: '${dianosticStorageAccountBlobEndpoint}sql-vulnerability-scans/' - } - dependsOn: [microsoftSqlServerOutboundFirewallRules] -} diff --git a/cloud-infrastructure/modules/microsoft-sql-server.bicep b/cloud-infrastructure/modules/microsoft-sql-server.bicep deleted file mode 100644 index 27db6c328a..0000000000 --- a/cloud-infrastructure/modules/microsoft-sql-server.bicep +++ /dev/null @@ -1,52 +0,0 @@ -param name string -param location string -param tags object -param subnetId string -param tenantId string -param sqlAdminObjectId string - -resource microsoftSqlServer 'Microsoft.Sql/servers@2023-08-01' = { - name: name - location: location - tags: tags - identity: { - type: 'SystemAssigned' - } - properties: { - minimalTlsVersion: '1.2' - publicNetworkAccess: 'Enabled' - administrators: { - administratorType: 'ActiveDirectory' - principalType: 'Group' - login: 'Azure SQL Server Admins' - sid: sqlAdminObjectId - tenantId: tenantId - azureADOnlyAuthentication: true - } - restrictOutboundNetworkAccess: 'Enabled' - } -} - -resource sqlServerVirtualNetworkRule 'Microsoft.Sql/servers/virtualNetworkRules@2023-08-01' = { - name: 'sql-server-virtual-network-rule' - parent: microsoftSqlServer - properties: { - ignoreMissingVnetServiceEndpoint: true - virtualNetworkSubnetId: subnetId - } -} - -resource microsoftSqlServerSecurityAlertPolicies 'Microsoft.Sql/servers/securityAlertPolicies@2023-08-01' = { - parent: microsoftSqlServer - name: 'Default' - properties: { - state: 'Enabled' - disabledAlerts: [''] - emailAddresses: [''] - emailAccountAdmins: false - retentionDays: 0 - } -} - -output sqlServerId string = microsoftSqlServer.id -output principalId string = microsoftSqlServer.identity.principalId diff --git a/cloud-infrastructure/modules/postgresql-flexible-database.bicep b/cloud-infrastructure/modules/postgresql-flexible-database.bicep new file mode 100644 index 0000000000..c0ebb550de --- /dev/null +++ b/cloud-infrastructure/modules/postgresql-flexible-database.bicep @@ -0,0 +1,12 @@ +param serverName string +param databaseName string + +resource postgresDatabase 'Microsoft.DBforPostgreSQL/flexibleServers/databases@2025-08-01' = { + name: '${serverName}/${databaseName}' + properties: { + charset: 'UTF8' + collation: 'en_US.utf8' + } +} + +output connectionString string = 'Host=${serverName}.postgres.database.azure.com;Database=${databaseName};Ssl Mode=VerifyFull' diff --git a/cloud-infrastructure/modules/postgresql-flexible-server.bicep b/cloud-infrastructure/modules/postgresql-flexible-server.bicep new file mode 100644 index 0000000000..9c7d4df81a --- /dev/null +++ b/cloud-infrastructure/modules/postgresql-flexible-server.bicep @@ -0,0 +1,148 @@ +param name string +param location string +param tags object +param tenantId string +param subnetId string +param virtualNetworkId string +param isProduction bool +param diagnosticStorageAccountId string + +resource postgresServer 'Microsoft.DBforPostgreSQL/flexibleServers@2025-08-01' = { + name: name + location: location + tags: tags + sku: { + name: isProduction ? 'Standard_D2ds_v5' : 'Standard_B1ms' + tier: isProduction ? 'GeneralPurpose' : 'Burstable' + } + properties: { + version: '17' + createMode: 'Default' + authConfig: { + activeDirectoryAuth: 'Enabled' + passwordAuth: 'Disabled' + tenantId: tenantId + } + storage: { + storageSizeGB: isProduction ? 32 : 32 // Change to 64, 128, etc. if more space is needed + } + backup: { + backupRetentionDays: isProduction ? 35 : 7 + geoRedundantBackup: isProduction ? 'Enabled' : 'Disabled' + } + highAvailability: { + // Zone-redundant HA provides automatic failover (<120s) with zero data loss and 99.99% SLA, + // but doubles the PostgreSQL cost. Requires General Purpose SKU (already used for production). + mode: isProduction ? 'Disabled' : 'Disabled' + } + network: { + // Public access is enabled because GitHub-hosted Actions runners cannot reach VNet resources. No permanent + // firewall rules exist -- runner IPs are added temporarily during CI/CD and removed immediately after. + // Runtime traffic from Container Apps flows exclusively through the private endpoint. + publicNetworkAccess: 'Enabled' + } + } +} + +resource privateDnsZone 'Microsoft.Network/privateDnsZones@2024-06-01' = { + name: 'privatelink.postgres.database.azure.com' + location: 'global' + tags: tags +} + +resource privateDnsZoneVnetLink 'Microsoft.Network/privateDnsZones/virtualNetworkLinks@2024-06-01' = { + parent: privateDnsZone + name: '${name}-vnet-link' + location: 'global' + properties: { + virtualNetwork: { + id: virtualNetworkId + } + registrationEnabled: false + } +} + +resource privateEndpoint 'Microsoft.Network/privateEndpoints@2025-01-01' = { + name: '${name}-postgres' + location: location + tags: tags + properties: { + customNetworkInterfaceName: '${name}-postgres' + subnet: { + id: subnetId + } + privateLinkServiceConnections: [ + { + name: '${name}-postgres-connection' + properties: { + privateLinkServiceId: postgresServer.id + groupIds: ['postgresqlServer'] + } + } + ] + } +} + +resource privateDnsZoneGroup 'Microsoft.Network/privateEndpoints/privateDnsZoneGroups@2025-01-01' = { + parent: privateEndpoint + name: 'default' + dependsOn: [privateDnsZoneVnetLink] + properties: { + privateDnsZoneConfigs: [ + { + name: 'postgres' + properties: { + privateDnsZoneId: privateDnsZone.id + } + } + ] + } +} + +resource extensionsConfig 'Microsoft.DBforPostgreSQL/flexibleServers/configurations@2025-08-01' = { + parent: postgresServer + name: 'azure.extensions' + dependsOn: [privateDnsZoneGroup] + properties: { + value: 'pg_stat_statements' + source: 'user-override' + } +} + +resource logStatementConfig 'Microsoft.DBforPostgreSQL/flexibleServers/configurations@2025-08-01' = { + parent: postgresServer + name: 'log_statement' + dependsOn: [extensionsConfig] + properties: { + value: 'mod' + source: 'user-override' + } +} + +resource walLevelConfig 'Microsoft.DBforPostgreSQL/flexibleServers/configurations@2025-08-01' = { + parent: postgresServer + name: 'wal_level' + dependsOn: [logStatementConfig] + properties: { + value: 'logical' + source: 'user-override' + } +} + +resource diagnosticSettings 'Microsoft.Insights/diagnosticSettings@2021-05-01-preview' = { + name: '${name}-postgres-diagnostics' + scope: postgresServer + dependsOn: [walLevelConfig] + properties: { + storageAccountId: diagnosticStorageAccountId + logs: [ + { + categoryGroup: 'allLogs' + enabled: true + } + ] + } +} + +output serverName string = postgresServer.name +output serverFqdn string = postgresServer.properties.fullyQualifiedDomainName diff --git a/cloud-infrastructure/modules/storage-account-retention.bicep b/cloud-infrastructure/modules/storage-account-retention.bicep new file mode 100644 index 0000000000..46c5dc4e23 --- /dev/null +++ b/cloud-infrastructure/modules/storage-account-retention.bicep @@ -0,0 +1,34 @@ +param storageAccountName string +param retentionDays int + +resource storageAccount 'Microsoft.Storage/storageAccounts@2025-06-01' existing = { + name: storageAccountName +} + +resource lifecyclePolicy 'Microsoft.Storage/storageAccounts/managementPolicies@2025-06-01' = { + parent: storageAccount + name: 'default' + properties: { + policy: { + rules: [ + { + name: 'delete-after-${retentionDays}-days' + enabled: true + type: 'Lifecycle' + definition: { + actions: { + baseBlob: { + delete: { + daysAfterModificationGreaterThan: retentionDays + } + } + } + filters: { + blobTypes: ['blockBlob', 'appendBlob'] + } + } + } + ] + } + } +} diff --git a/cloud-infrastructure/modules/storage-account.bicep b/cloud-infrastructure/modules/storage-account.bicep index 06a08f3f2e..b640dcfda2 100644 --- a/cloud-infrastructure/modules/storage-account.bicep +++ b/cloud-infrastructure/modules/storage-account.bicep @@ -65,5 +65,6 @@ module storageBlobDataContributorRoleAssignment 'role-assignments-storage-blob-d dependsOn: [storageAccount] } +output name string = storageAccount.name output blobEndpoint string = storageAccount.properties.primaryEndpoints.blob output storageAccountId string = storageAccount.id diff --git a/cloud-infrastructure/modules/user-assigned-managed-identity.bicep b/cloud-infrastructure/modules/user-assigned-managed-identity.bicep index 116979fce4..56a85b26d5 100644 --- a/cloud-infrastructure/modules/user-assigned-managed-identity.bicep +++ b/cloud-infrastructure/modules/user-assigned-managed-identity.bicep @@ -67,6 +67,7 @@ resource signKeyVaultKeysRoleAssignment 'Microsoft.Authorization/roleAssignments } } +output name string = userAssignedIdentity.name output id string = userAssignedIdentity.id output clientId string = userAssignedIdentity.properties.clientId output principalId string = userAssignedIdentity.properties.principalId diff --git a/cloud-infrastructure/modules/virtual-network.bicep b/cloud-infrastructure/modules/virtual-network.bicep index 13740def21..75f08dc3c6 100644 --- a/cloud-infrastructure/modules/virtual-network.bicep +++ b/cloud-infrastructure/modules/virtual-network.bicep @@ -3,6 +3,9 @@ param location string param tags object param address string +var addressPrefix = split(address, '.')[0] +var privateEndpointSubnet = '${addressPrefix}.0.2.0/24' + resource virtualNetwork 'Microsoft.Network/virtualNetworks@2025-01-01' = { name: name location: location @@ -18,18 +21,28 @@ resource virtualNetwork 'Microsoft.Network/virtualNetworks@2025-01-01' = { } subnets: [ { - name: 'subnet' + name: 'container-apps' properties: { addressPrefix: '${address}/23' serviceEndpoints: [ { service: 'Microsoft.KeyVault' } + ] + delegations: [ { - service: 'Microsoft.Sql' + name: 'Microsoft.App.environments' + properties: { + serviceName: 'Microsoft.App/environments' + } } ] - delegations: [] + } + } + { + name: 'private-endpoints' + properties: { + addressPrefix: privateEndpointSubnet privateEndpointNetworkPolicies: 'Enabled' privateLinkServiceNetworkPolicies: 'Enabled' } @@ -40,4 +53,5 @@ resource virtualNetwork 'Microsoft.Network/virtualNetworks@2025-01-01' = { output virtualNetworkName string = virtualNetwork.name output virtualNetworkId string = virtualNetwork.id -output subnetId string = virtualNetwork.properties.subnets[0].id +output containerAppsSubnetId string = virtualNetwork.properties.subnets[0].id +output privateEndpointSubnetId string = virtualNetwork.properties.subnets[1].id diff --git a/developer-cli/Commands/DeployCommand.cs b/developer-cli/Commands/DeployCommand.cs index 21609aa1a4..f735a85b2d 100644 --- a/developer-cli/Commands/DeployCommand.cs +++ b/developer-cli/Commands/DeployCommand.cs @@ -62,7 +62,7 @@ private void Execute() ConfirmReuseIfAppRegistrationsExist(); - ConfirmReuseIfSqlAdminSecurityGroupExists(); + ConfirmReuseIfPostgresAdminSecurityGroupExists(); CollectAdditionalInfo(); @@ -82,7 +82,7 @@ private void Execute() GrantSubscriptionPermissionsToServicePrincipals(); - CreateAzureSqlServerSecurityGroups(); + CreateAzurePostgresAdminSecurityGroups(); CreateGithubEnvironments(); @@ -298,7 +298,7 @@ private void CollectUniquePrefix() var uniquePrefix = Config.GithubVariables.GetValueOrDefault(nameof(VariableNames.UNIQUE_PREFIX)); AnsiConsole.MarkupLine( - "When creating Azure resources like Azure Container Registry, SQL Server, Blob storage, Service Bus, Key Vaults, etc., a global unique name is required. To do this we use a prefix of 2-6 characters, which allows for flexibility for the rest of the name. E.g. if you select 'acme' the production SQL Server in West Europe will be named 'acme-prod-euw'." + "When creating Azure resources like Azure Container Registry, PostgreSQL, Blob storage, Service Bus, Key Vaults, etc., a global unique name is required. To do this we use a prefix of 2-6 characters, which allows for flexibility for the rest of the name. E.g. if you select 'acme' the production PostgreSQL server in West Europe will be named 'acme-prod-euw'." ); if (uniquePrefix is not null) @@ -403,24 +403,24 @@ void ConfirmReuseIfAppRegistrationExist(AppRegistration appRegistration) } } - private void ConfirmReuseIfSqlAdminSecurityGroupExists() + private void ConfirmReuseIfPostgresAdminSecurityGroupExists() { - Config.StagingSubscription.SqlAdminsGroup.ObjectId = ConfirmReuseIfSqlAdminSecurityGroupExist(Config.StagingSubscription.SqlAdminsGroup.Name); - Config.ProductionSubscription.SqlAdminsGroup.ObjectId = ConfirmReuseIfSqlAdminSecurityGroupExist(Config.ProductionSubscription.SqlAdminsGroup.Name); + Config.StagingSubscription.PostgresAdminsGroup.ObjectId = ConfirmReuseIfPostgresAdminSecurityGroupExist(Config.StagingSubscription.PostgresAdminsGroup.Name); + Config.ProductionSubscription.PostgresAdminsGroup.ObjectId = ConfirmReuseIfPostgresAdminSecurityGroupExist(Config.ProductionSubscription.PostgresAdminsGroup.Name); - string? ConfirmReuseIfSqlAdminSecurityGroupExist(string sqlAdminsSecurityGroupName) + string? ConfirmReuseIfPostgresAdminSecurityGroupExist(string dbAdminsSecurityGroupName) { - var sqlAdminsObjectId = RunAzureCliCommand( - $"""ad group list --display-name "{sqlAdminsSecurityGroupName}" --query "[].id" -o tsv""" + var dbAdminsObjectId = RunAzureCliCommand( + $"""ad group list --display-name "{dbAdminsSecurityGroupName}" --query "[].id" -o tsv""" ).Trim(); - if (sqlAdminsObjectId == string.Empty) + if (dbAdminsObjectId == string.Empty) { return null; } AnsiConsole.MarkupLine( - $"[yellow]The AD Security Group '{sqlAdminsSecurityGroupName}' already exists with ID: {sqlAdminsObjectId}[/]" + $"[yellow]The AD Security Group '{dbAdminsSecurityGroupName}' already exists with ID: {dbAdminsObjectId}[/]" ); if (!AnsiConsole.Confirm("The existing AD Security Group will be reused. Do you want to continue?")) @@ -431,7 +431,7 @@ private void ConfirmReuseIfSqlAdminSecurityGroupExists() AnsiConsole.WriteLine(); - return sqlAdminsObjectId; + return dbAdminsObjectId; } } @@ -452,11 +452,11 @@ private void ConfirmChangesPrompt() var productionServicePrincipal = Config.ProductionSubscription.AppRegistration.Exists ? Config.ProductionSubscription.AppRegistration.ServicePrincipalId : "Will be generated"; - var stagingSqlAdminObject = Config.StagingSubscription.SqlAdminsGroup.Exists - ? Config.StagingSubscription.SqlAdminsGroup.ObjectId + var stagingPostgresAdminObject = Config.StagingSubscription.PostgresAdminsGroup.Exists + ? Config.StagingSubscription.PostgresAdminsGroup.ObjectId : "Will be generated"; - var productionSqlAdminObject = Config.ProductionSubscription.SqlAdminsGroup.Exists - ? Config.ProductionSubscription.SqlAdminsGroup.ObjectId + var productionPostgresAdminObject = Config.ProductionSubscription.PostgresAdminsGroup.Exists + ? Config.ProductionSubscription.PostgresAdminsGroup.ObjectId : "Will be generated"; var setupConfirmPrompt = @@ -472,10 +472,10 @@ [bold]Please review planned changes before continuing.[/] [yellow]** The Service Principals will get 'Contributor' and 'User Access Administrator' role on the Azure Subscriptions.[/] [bold]Active Directory Security Groups:[/] - * [blue]{Config.StagingSubscription.SqlAdminsGroup.Name}[/] - * [blue]{Config.ProductionSubscription.SqlAdminsGroup.Name}[/] + * [blue]{Config.StagingSubscription.PostgresAdminsGroup.Name}[/] + * [blue]{Config.ProductionSubscription.PostgresAdminsGroup.Name}[/] - [yellow]** The SQL Admins Security Groups are used to grant Managed Identities and CI/CD permissions to SQL Databases.[/] + [yellow]** The PostgreSQL Admins Security Groups are used to grant Managed Identities and CI/CD permissions to PostgreSQL databases.[/] 2. The following GitHub environments will be created if not exists: * [blue]staging[/] @@ -493,7 +493,7 @@ [bold]Please review planned changes before continuing.[/] * STAGING_SUBSCRIPTION_ID: [blue]{Config.StagingSubscription.Id}[/] * STAGING_SHARED_LOCATION: [blue]{Config.StagingLocation.SharedLocation}[/] * STAGING_SERVICE_PRINCIPAL_ID: [blue]{stagingServicePrincipal}[/] - * STAGING_SQL_ADMIN_OBJECT_ID: [blue]{stagingSqlAdminObject}[/] + * STAGING_POSTGRES_ADMIN_OBJECT_ID: [blue]{stagingPostgresAdminObject}[/] * STAGING_DOMAIN_NAME: [blue]-[/] ([yellow]Manually changed this and triggered deployment to set up the domain[/]) [bold]Staging Cluster Variables:[/] @@ -506,7 +506,7 @@ [bold]Please review planned changes before continuing.[/] * PRODUCTION_SHARED_LOCATION: [blue]{Config.ProductionLocation.SharedLocation}[/] * PRODUCTION_SERVICE_PRINCIPAL_ID: [blue]{productionServicePrincipal}[/] * PRODUCTION_SERVICE_PRINCIPAL_OBJECT_ID: [blue]{Config.ProductionSubscription.AppRegistration.ServicePrincipalObjectId}[/] - * PRODUCTION_SQL_ADMIN_OBJECT_ID: [blue]{productionSqlAdminObject}[/] + * PRODUCTION_POSTGRES_ADMIN_OBJECT_ID: [blue]{productionPostgresAdminObject}[/] * PRODUCTION_DOMAIN_NAME: [blue]-[/] ([yellow]Manually changed this and triggered deployment to set up the domain[/]) [bold]Production Cluster 1 Variables:[/] @@ -662,27 +662,27 @@ void GrantAccess(Subscription subscription, string appRegistrationName) } } - private void CreateAzureSqlServerSecurityGroups() + private void CreateAzurePostgresAdminSecurityGroups() { - CreateAzureSqlServerSecurityGroup(Config.StagingSubscription.SqlAdminsGroup, Config.StagingSubscription.AppRegistration); - CreateAzureSqlServerSecurityGroup(Config.ProductionSubscription.SqlAdminsGroup, Config.ProductionSubscription.AppRegistration); + CreateAzurePostgresAdminSecurityGroup(Config.StagingSubscription.PostgresAdminsGroup, Config.StagingSubscription.AppRegistration); + CreateAzurePostgresAdminSecurityGroup(Config.ProductionSubscription.PostgresAdminsGroup, Config.ProductionSubscription.AppRegistration); - void CreateAzureSqlServerSecurityGroup(SqlAdminsGroup sqlAdminGroup, AppRegistration appRegistration) + void CreateAzurePostgresAdminSecurityGroup(PostgresAdminsGroup dbAdminGroup, AppRegistration appRegistration) { - if (!sqlAdminGroup.Exists) + if (!dbAdminGroup.Exists) { - sqlAdminGroup.ObjectId = RunAzureCliCommand( - $"""ad group create --display-name "{sqlAdminGroup.Name}" --mail-nickname "{sqlAdminGroup.NickName}" --query "id" -o tsv""" + dbAdminGroup.ObjectId = RunAzureCliCommand( + $"""ad group create --display-name "{dbAdminGroup.Name}" --mail-nickname "{dbAdminGroup.NickName}" --query "id" -o tsv""" ).Trim(); } RunAzureCliCommand( - $"ad group member add --group {sqlAdminGroup.ObjectId} --member-id {appRegistration.ServicePrincipalObjectId}", + $"ad group member add --group {dbAdminGroup.ObjectId} --member-id {appRegistration.ServicePrincipalObjectId}", !Configuration.TraceEnabled ); AnsiConsole.MarkupLine( - $"[green]Successfully created AD Security Group '{sqlAdminGroup.Name}' and assigned the App Registration '{appRegistration.Name}' owner role.[/]" + $"[green]Successfully created AD Security Group '{dbAdminGroup.Name}' and assigned the App Registration '{appRegistration.Name}' owner role.[/]" ); } } @@ -723,7 +723,7 @@ private static void CreateGithubSecretsAndVariables() SetGithubVariable(VariableNames.STAGING_SUBSCRIPTION_ID, Config.StagingSubscription.Id); SetGithubVariable(VariableNames.STAGING_SERVICE_PRINCIPAL_ID, Config.StagingSubscription.AppRegistration.ServicePrincipalId!); SetGithubVariable(VariableNames.STAGING_SHARED_LOCATION, Config.StagingLocation.SharedLocation); - SetGithubVariable(VariableNames.STAGING_SQL_ADMIN_OBJECT_ID, Config.StagingSubscription.SqlAdminsGroup.ObjectId!); + SetGithubVariable(VariableNames.STAGING_POSTGRES_ADMIN_OBJECT_ID, Config.StagingSubscription.PostgresAdminsGroup.ObjectId!); SetGithubVariable(VariableNames.STAGING_DOMAIN_NAME, "-"); SetGithubVariable(VariableNames.STAGING_CLUSTER_ENABLED, "true"); @@ -734,7 +734,7 @@ private static void CreateGithubSecretsAndVariables() SetGithubVariable(VariableNames.PRODUCTION_SERVICE_PRINCIPAL_ID, Config.ProductionSubscription.AppRegistration.ServicePrincipalId!); SetGithubVariable(VariableNames.PRODUCTION_SERVICE_PRINCIPAL_OBJECT_ID, Config.ProductionSubscription.AppRegistration.ServicePrincipalObjectId!); SetGithubVariable(VariableNames.PRODUCTION_SHARED_LOCATION, Config.ProductionLocation.SharedLocation); - SetGithubVariable(VariableNames.PRODUCTION_SQL_ADMIN_OBJECT_ID, Config.ProductionSubscription.SqlAdminsGroup.ObjectId!); + SetGithubVariable(VariableNames.PRODUCTION_POSTGRES_ADMIN_OBJECT_ID, Config.ProductionSubscription.PostgresAdminsGroup.ObjectId!); SetGithubVariable(VariableNames.PRODUCTION_DOMAIN_NAME, "-"); SetGithubVariable(VariableNames.PRODUCTION_CLUSTER1_ENABLED, "false"); @@ -1028,7 +1028,7 @@ public class Subscription(string id, string name, string tenantId, GithubInfo gi public AppRegistration AppRegistration { get; } = new(githubInfo, environmentName); - public SqlAdminsGroup SqlAdminsGroup { get; } = new(githubInfo, environmentName); + public PostgresAdminsGroup PostgresAdminsGroup { get; } = new(githubInfo, environmentName); } public class AppRegistration(GithubInfo githubInfo, string environmentName) @@ -1044,11 +1044,11 @@ public class AppRegistration(GithubInfo githubInfo, string environmentName) public string? ServicePrincipalObjectId { get; set; } } -public class SqlAdminsGroup(GithubInfo githubInfo, string environmentName) +public class PostgresAdminsGroup(GithubInfo githubInfo, string environmentName) { - public string Name => $"SQL Admins - {environmentName} - {githubInfo.OrganizationName}/{githubInfo.RepositoryName}"; + public string Name => $"PostgreSQL Admins - {environmentName} - {githubInfo.OrganizationName}/{githubInfo.RepositoryName}"; - public string NickName => $"SQLServerAdmins{environmentName}{githubInfo.OrganizationName}{githubInfo.RepositoryName}"; + public string NickName => $"PostgreSQLAdmins{environmentName}{githubInfo.OrganizationName}{githubInfo.RepositoryName}"; public bool Exists => !string.IsNullOrEmpty(ObjectId); @@ -1066,7 +1066,7 @@ public enum VariableNames STAGING_SUBSCRIPTION_ID, STAGING_SERVICE_PRINCIPAL_ID, STAGING_SHARED_LOCATION, - STAGING_SQL_ADMIN_OBJECT_ID, + STAGING_POSTGRES_ADMIN_OBJECT_ID, STAGING_DOMAIN_NAME, STAGING_CLUSTER_ENABLED, @@ -1077,7 +1077,7 @@ public enum VariableNames PRODUCTION_SERVICE_PRINCIPAL_ID, PRODUCTION_SERVICE_PRINCIPAL_OBJECT_ID, PRODUCTION_SHARED_LOCATION, - PRODUCTION_SQL_ADMIN_OBJECT_ID, + PRODUCTION_POSTGRES_ADMIN_OBJECT_ID, PRODUCTION_DOMAIN_NAME, PRODUCTION_CLUSTER1_ENABLED,