+ |
+ toggleSelect(bead.bead_id)}
+ aria-label={`Select bead ${bead.bead_id}`}
+ />
+ |
setConfirmAction(null)}>
-
- {confirmAction?.type === 'close' ? 'Force Close Bead' : 'Force Fail Bead'}
-
-
- This will {confirmAction?.type === 'close' ? 'force-close' : 'force-fail'} bead{' '}
- {confirmAction?.beadId.slice(0, 8)}…
- {confirmAction?.title ? ` (${confirmAction.title})` : ''}. This action is logged in
- the audit trail.
-
+ {confirmDialogTitle()}
+ {confirmDialogDescription()}
diff --git a/apps/web/src/lib/gastown/types/router.d.ts b/apps/web/src/lib/gastown/types/router.d.ts
index c5e2d76067..da9eff6d5f 100644
--- a/apps/web/src/lib/gastown/types/router.d.ts
+++ b/apps/web/src/lib/gastown/types/router.d.ts
@@ -138,6 +138,7 @@ export declare const gastownRouter: import('@trpc/server').TRPCBuiltRouter<
review_mode?: 'rework' | 'comments' | undefined;
code_review?: boolean | undefined;
auto_resolve_pr_feedback?: boolean | undefined;
+ auto_resolve_merge_conflicts?: boolean | undefined;
auto_merge_delay_minutes?: number | null | undefined;
merge_strategy?: 'direct' | 'pr' | undefined;
convoy_merge_mode?: 'review-then-land' | 'review-and-merge' | undefined;
@@ -209,6 +210,7 @@ export declare const gastownRouter: import('@trpc/server').TRPCBuiltRouter<
review_mode?: 'rework' | 'comments' | undefined;
code_review?: boolean | undefined;
auto_resolve_pr_feedback?: boolean | undefined;
+ auto_resolve_merge_conflicts?: boolean | undefined;
auto_merge_delay_minutes?: number | null | undefined;
merge_strategy?: 'direct' | 'pr' | undefined;
convoy_merge_mode?: 'review-then-land' | 'review-and-merge' | undefined;
@@ -555,6 +557,7 @@ export declare const gastownRouter: import('@trpc/server').TRPCBuiltRouter<
code_review: boolean;
review_mode: 'comments' | 'rework';
auto_resolve_pr_feedback: boolean;
+ auto_resolve_merge_conflicts: boolean;
auto_merge_delay_minutes: number | null;
}
| undefined;
@@ -619,6 +622,7 @@ export declare const gastownRouter: import('@trpc/server').TRPCBuiltRouter<
code_review?: boolean | undefined;
review_mode?: 'comments' | 'rework' | undefined;
auto_resolve_pr_feedback?: boolean | undefined;
+ auto_resolve_merge_conflicts?: boolean | undefined;
auto_merge_delay_minutes?: number | null | undefined;
}
| undefined;
@@ -677,6 +681,7 @@ export declare const gastownRouter: import('@trpc/server').TRPCBuiltRouter<
code_review: boolean;
review_mode: 'comments' | 'rework';
auto_resolve_pr_feedback: boolean;
+ auto_resolve_merge_conflicts: boolean;
auto_merge_delay_minutes: number | null;
}
| undefined;
@@ -1537,6 +1542,7 @@ export declare const wrappedGastownRouter: import('@trpc/server').TRPCBuiltRoute
review_mode?: 'rework' | 'comments' | undefined;
code_review?: boolean | undefined;
auto_resolve_pr_feedback?: boolean | undefined;
+ auto_resolve_merge_conflicts?: boolean | undefined;
auto_merge_delay_minutes?: number | null | undefined;
merge_strategy?: 'direct' | 'pr' | undefined;
convoy_merge_mode?: 'review-then-land' | 'review-and-merge' | undefined;
@@ -1608,6 +1614,7 @@ export declare const wrappedGastownRouter: import('@trpc/server').TRPCBuiltRoute
review_mode?: 'rework' | 'comments' | undefined;
code_review?: boolean | undefined;
auto_resolve_pr_feedback?: boolean | undefined;
+ auto_resolve_merge_conflicts?: boolean | undefined;
auto_merge_delay_minutes?: number | null | undefined;
merge_strategy?: 'direct' | 'pr' | undefined;
convoy_merge_mode?: 'review-then-land' | 'review-and-merge' | undefined;
@@ -1954,6 +1961,7 @@ export declare const wrappedGastownRouter: import('@trpc/server').TRPCBuiltRoute
code_review: boolean;
review_mode: 'comments' | 'rework';
auto_resolve_pr_feedback: boolean;
+ auto_resolve_merge_conflicts: boolean;
auto_merge_delay_minutes: number | null;
}
| undefined;
@@ -2018,6 +2026,7 @@ export declare const wrappedGastownRouter: import('@trpc/server').TRPCBuiltRoute
code_review?: boolean | undefined;
review_mode?: 'comments' | 'rework' | undefined;
auto_resolve_pr_feedback?: boolean | undefined;
+ auto_resolve_merge_conflicts?: boolean | undefined;
auto_merge_delay_minutes?: number | null | undefined;
}
| undefined;
@@ -2076,6 +2085,7 @@ export declare const wrappedGastownRouter: import('@trpc/server').TRPCBuiltRoute
code_review: boolean;
review_mode: 'comments' | 'rework';
auto_resolve_pr_feedback: boolean;
+ auto_resolve_merge_conflicts: boolean;
auto_merge_delay_minutes: number | null;
}
| undefined;
diff --git a/apps/web/src/routers/admin/gastown-router.ts b/apps/web/src/routers/admin/gastown-router.ts
index 43dc72def8..eae3aa209f 100644
--- a/apps/web/src/routers/admin/gastown-router.ts
+++ b/apps/web/src/routers/admin/gastown-router.ts
@@ -755,6 +755,40 @@ export const adminGastownRouter = createTRPCRouter({
);
}),
+ bulkDeleteBeads: adminProcedure
+ .input(z.object({ townId: z.string().uuid(), beadIds: z.array(z.string().uuid()) }))
+ .output(z.object({ deleted: z.number() }))
+ .mutation(async ({ input, ctx }) => {
+ const result = await gastownTrpcMutate(
+ ctx.user,
+ 'gastown.adminBulkDeleteBeads',
+ { townId: input.townId, beadIds: input.beadIds },
+ z.object({ deleted: z.number() })
+ );
+ return result ?? { deleted: 0 };
+ }),
+
+ deleteBeadsByStatus: adminProcedure
+ .input(
+ z.object({
+ townId: z.string().uuid(),
+ status: z.enum(['open', 'in_progress', 'in_review', 'closed', 'failed']),
+ type: z
+ .enum(['issue', 'message', 'escalation', 'merge_request', 'convoy', 'molecule', 'agent'])
+ .optional(),
+ })
+ )
+ .output(z.object({ deleted: z.number() }))
+ .mutation(async ({ input, ctx }) => {
+ const result = await gastownTrpcMutate(
+ ctx.user,
+ 'gastown.adminDeleteBeadsByStatus',
+ { townId: input.townId, status: input.status, type: input.type },
+ z.object({ deleted: z.number() })
+ );
+ return result ?? { deleted: 0 };
+ }),
+
/** Force-retry a stalled review queue entry. Not yet implemented on the worker. */
forceRetryReview: adminProcedure
.input(z.object({ townId: z.string().uuid(), entryId: z.string().uuid() }))
diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml
index 76a01012fa..ad3be75408 100644
--- a/pnpm-lock.yaml
+++ b/pnpm-lock.yaml
@@ -1545,11 +1545,11 @@ importers:
services/gastown/container:
dependencies:
'@kilocode/plugin':
- specifier: 7.1.23
- version: 7.1.23
+ specifier: 7.2.14
+ version: 7.2.14
'@kilocode/sdk':
- specifier: 7.1.23
- version: 7.1.23
+ specifier: 7.2.14
+ version: 7.2.14
hono:
specifier: ^4.12.7
version: 4.12.8
@@ -4129,12 +4129,23 @@ packages:
'@jridgewell/trace-mapping@0.3.9':
resolution: {integrity: sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==}
- '@kilocode/plugin@7.1.23':
- resolution: {integrity: sha512-sFI0rlgQg3mzYP05/gg09IE1mN4QyAXyOlJjM36CFoVKi1bPYz/dqEqr6ddmMNmB577A4gDu0D07hr0pGIFrBA==}
+ '@kilocode/plugin@7.2.14':
+ resolution: {integrity: sha512-mS+WA9HZIBH2qQ9ARA+v0q4MdQTSdfOvKbe4AOSkjP+P5hVA70OM/UVM9DVcvmjSOxU+wuUxmOy+j/EQIrgFmw==}
+ peerDependencies:
+ '@opentui/core': '>=0.1.97'
+ '@opentui/solid': '>=0.1.97'
+ peerDependenciesMeta:
+ '@opentui/core':
+ optional: true
+ '@opentui/solid':
+ optional: true
'@kilocode/sdk@7.1.23':
resolution: {integrity: sha512-moSKXqpwE+ozVbNE1aYIUb5Kd7fesOicRUn90WiMlp+8lRhqQc6ZTTIaIB9ZzD7Dak//4bSuo++bb+Jtw3U4Fg==}
+ '@kilocode/sdk@7.2.14':
+ resolution: {integrity: sha512-Naz83lFrsbavuDp6UwxRuglOaSNvRBsZfcRNvb7RpWYAwbuJP0dBdhpXj6uO3ta5qxeQ2JzxKNC9Ffz+LCLLDg==}
+
'@lottiefiles/dotlottie-react@0.17.15':
resolution: {integrity: sha512-4wYAjsJhM28eUvJ/gT3KRM6fcyT7EM9n7PDrP71LaBTacc6bSN43qFTSJc1Li3QxUiraz23p0Q8EJBzXo8DsRw==}
peerDependencies:
@@ -17656,13 +17667,17 @@ snapshots:
'@jridgewell/resolve-uri': 3.1.2
'@jridgewell/sourcemap-codec': 1.5.5
- '@kilocode/plugin@7.1.23':
+ '@kilocode/plugin@7.2.14':
dependencies:
- '@kilocode/sdk': 7.1.23
+ '@kilocode/sdk': 7.2.14
zod: 4.1.8
'@kilocode/sdk@7.1.23': {}
+ '@kilocode/sdk@7.2.14':
+ dependencies:
+ cross-spawn: 7.0.6
+
'@lottiefiles/dotlottie-react@0.17.15(react@19.2.4)':
dependencies:
'@lottiefiles/dotlottie-web': 0.63.0
diff --git a/services/gastown/container/Dockerfile b/services/gastown/container/Dockerfile
index b835a33329..a0db8ddc00 100644
--- a/services/gastown/container/Dockerfile
+++ b/services/gastown/container/Dockerfile
@@ -4,7 +4,7 @@ FROM oven/bun:1-slim
# Package categories:
# version control: git, git-lfs
# network/download: curl, wget, ca-certificates, gnupg, unzip
-# build toolchain: build-essential, autoconf
+# build toolchain: build-essential, autoconf, cmake, pkg-config
# search tools: ripgrep, jq
# compression: bzip2, zstd
# SSL/crypto: libssl-dev, libffi-dev
@@ -16,6 +16,7 @@ FROM oven/bun:1-slim
# C++ stdlib: libc++1
# math: libgmp-dev
# timezone data: tzdata
+# Java: default-jdk
RUN apt-get update && \
apt-get install -y --no-install-recommends \
git \
@@ -27,6 +28,8 @@ RUN apt-get update && \
unzip \
build-essential \
autoconf \
+ cmake \
+ pkg-config \
ripgrep \
jq \
bzip2 \
@@ -47,6 +50,7 @@ RUN apt-get update && \
libc++1 \
libgmp-dev \
tzdata \
+ default-jdk \
&& curl -fsSL https://deb.nodesource.com/setup_24.x | bash - \
&& apt-get install -y --no-install-recommends nodejs \
&& curl -fsSL https://cli.github.com/packages/githubcli-archive-keyring.gpg \
@@ -68,7 +72,7 @@ RUN git lfs install --system
# Install both glibc and musl variants — the CLI's binary resolver may
# pick either depending on the detected libc.
# Also install pnpm — many projects use it as their package manager.
-RUN npm install -g @kilocode/cli @kilocode/cli-linux-x64 @kilocode/cli-linux-x64-musl @kilocode/plugin pnpm && \
+RUN npm install -g @kilocode/cli@7.2.14 @kilocode/cli-linux-x64@7.2.14 @kilocode/cli-linux-x64-musl@7.2.14 @kilocode/plugin@7.2.14 pnpm && \
ln -s "$(which kilo)" /usr/local/bin/opencode
# Create non-root user for defense-in-depth
diff --git a/services/gastown/container/Dockerfile.dev b/services/gastown/container/Dockerfile.dev
index a4bebc5dbf..0b5ecf53ff 100644
--- a/services/gastown/container/Dockerfile.dev
+++ b/services/gastown/container/Dockerfile.dev
@@ -4,7 +4,7 @@ FROM --platform=linux/arm64 oven/bun:1-slim
# Package categories:
# version control: git, git-lfs
# network/download: curl, wget, ca-certificates, gnupg, unzip
-# build toolchain: build-essential, autoconf
+# build toolchain: build-essential, autoconf, cmake, pkg-config
# search tools: ripgrep, jq
# compression: bzip2, zstd
# SSL/crypto: libssl-dev, libffi-dev
@@ -16,6 +16,7 @@ FROM --platform=linux/arm64 oven/bun:1-slim
# C++ stdlib: libc++1
# math: libgmp-dev
# timezone data: tzdata
+# Java: default-jdk
RUN apt-get update && \
apt-get install -y --no-install-recommends \
git \
@@ -27,6 +28,8 @@ RUN apt-get update && \
unzip \
build-essential \
autoconf \
+ cmake \
+ pkg-config \
ripgrep \
jq \
bzip2 \
@@ -47,6 +50,7 @@ RUN apt-get update && \
libc++1 \
libgmp-dev \
tzdata \
+ default-jdk \
&& curl -fsSL https://deb.nodesource.com/setup_24.x | bash - \
&& apt-get install -y --no-install-recommends nodejs \
&& curl -fsSL https://cli.github.com/packages/githubcli-archive-keyring.gpg \
@@ -67,7 +71,7 @@ RUN git lfs install --system
# pick either depending on the detected libc. bun:1-slim is Debian (glibc)
# but the resolver sometimes misdetects; installing both is safe.
# Also install pnpm — many projects use it as their package manager.
-RUN npm install -g @kilocode/cli @kilocode/cli-linux-arm64 @kilocode/cli-linux-arm64-musl pnpm && \
+RUN npm install -g @kilocode/cli@7.2.14 @kilocode/cli-linux-arm64@7.2.14 @kilocode/cli-linux-arm64-musl@7.2.14 @kilocode/plugin@7.2.14 pnpm && \
ln -s "$(which kilo)" /usr/local/bin/opencode
# Create non-root user for defense-in-depth
diff --git a/services/gastown/container/package.json b/services/gastown/container/package.json
index 493808087a..197a4eb38d 100644
--- a/services/gastown/container/package.json
+++ b/services/gastown/container/package.json
@@ -12,8 +12,8 @@
"lint": "pnpm -w exec oxlint --config .oxlintrc.json services/gastown/container/src"
},
"dependencies": {
- "@kilocode/plugin": "7.1.23",
- "@kilocode/sdk": "7.1.23",
+ "@kilocode/plugin": "7.2.14",
+ "@kilocode/sdk": "7.2.14",
"hono": "catalog:",
"zod": "catalog:"
},
diff --git a/services/gastown/container/plugin/client.ts b/services/gastown/container/plugin/client.ts
index 6a222b7b41..e4a3be97ef 100644
--- a/services/gastown/container/plugin/client.ts
+++ b/services/gastown/container/plugin/client.ts
@@ -409,6 +409,7 @@ export class MayorGastownClient {
status?: 'open' | 'in_progress' | 'in_review' | 'closed' | 'failed';
priority?: 'low' | 'medium' | 'high' | 'critical';
labels?: string[];
+ depends_on?: string[];
}
): Promise {
return this.request(this.mayorPath(`/rigs/${rigId}/beads/${beadId}`), {
@@ -417,6 +418,27 @@ export class MayorGastownClient {
});
}
+ async convoyAddBead(
+ convoyId: string,
+ beadId: string,
+ dependsOn?: string[]
+ ): Promise<{ total_beads: number }> {
+ return this.request<{ total_beads: number }>(this.mayorPath(`/convoys/${convoyId}/add-bead`), {
+ method: 'POST',
+ body: JSON.stringify({ bead_id: beadId, depends_on: dependsOn }),
+ });
+ }
+
+ async convoyRemoveBead(convoyId: string, beadId: string): Promise<{ total_beads: number }> {
+ return this.request<{ total_beads: number }>(
+ this.mayorPath(`/convoys/${convoyId}/remove-bead`),
+ {
+ method: 'POST',
+ body: JSON.stringify({ bead_id: beadId }),
+ }
+ );
+ }
+
async reassignBead(rigId: string, beadId: string, agentId: string): Promise {
return this.request(this.mayorPath(`/rigs/${rigId}/beads/${beadId}/reassign`), {
method: 'POST',
@@ -430,6 +452,27 @@ export class MayorGastownClient {
});
}
+ async deleteBeads(rigId: string, beadIds: string[]): Promise<{ deleted: number }> {
+ return this.request<{ deleted: number }>(this.mayorPath(`/rigs/${rigId}/beads/bulk-delete`), {
+ method: 'POST',
+ body: JSON.stringify({ bead_ids: beadIds }),
+ });
+ }
+
+ async deleteBeadsByStatus(
+ rigId: string,
+ status: 'open' | 'in_progress' | 'in_review' | 'closed' | 'failed',
+ type?: string
+ ): Promise<{ deleted: number }> {
+ return this.request<{ deleted: number }>(
+ this.mayorPath(`/rigs/${rigId}/beads/delete-by-status`),
+ {
+ method: 'POST',
+ body: JSON.stringify({ status, ...(type ? { type } : {}) }),
+ }
+ );
+ }
+
async resetAgent(rigId: string, agentId: string): Promise {
await this.request(this.mayorPath(`/rigs/${rigId}/agents/${agentId}/reset`), {
method: 'POST',
diff --git a/services/gastown/container/plugin/mayor-tools.ts b/services/gastown/container/plugin/mayor-tools.ts
index 09b28ded47..85d08e94e1 100644
--- a/services/gastown/container/plugin/mayor-tools.ts
+++ b/services/gastown/container/plugin/mayor-tools.ts
@@ -292,7 +292,7 @@ export function createMayorTools(client: MayorGastownClient) {
}),
gt_bead_update: tool({
- description: "Edit a bead's status, title, body, priority, or labels.",
+ description: "Edit a bead's status, title, body, priority, labels, or dependency blockers.",
args: {
rig_id: tool.schema.string().describe('The UUID of the rig the bead belongs to'),
bead_id: tool.schema.string().describe('The UUID of the bead to update'),
@@ -310,6 +310,13 @@ export function createMayorTools(client: MayorGastownClient) {
.array(tool.schema.string())
.describe('Replacement labels array for the bead')
.optional(),
+ depends_on: tool.schema
+ .array(tool.schema.string())
+ .describe(
+ "Replace this bead's blockers. Pass an array of bead UUIDs that must be closed before this bead can be dispatched. " +
+ 'Pass an empty array [] to remove all blockers. Omit to leave dependencies unchanged.'
+ )
+ .optional(),
},
async execute(args) {
const bead = await client.updateBead(args.rig_id, args.bead_id, {
@@ -318,6 +325,7 @@ export function createMayorTools(client: MayorGastownClient) {
status: args.status,
priority: args.priority,
labels: args.labels,
+ depends_on: args.depends_on,
});
return `Bead ${bead.bead_id} updated. Status: ${bead.status}, Priority: ${bead.priority}, Title: "${bead.title}".`;
},
@@ -337,14 +345,22 @@ export function createMayorTools(client: MayorGastownClient) {
}),
gt_bead_delete: tool({
- description: 'Delete a bead. Use with caution — this is irreversible.',
+ description:
+ 'Delete one or more beads. Use with caution — this is irreversible. Pass a single UUID string or an array of UUIDs to bulk-delete up to 5000 at once.',
args: {
- rig_id: tool.schema.string().describe('The UUID of the rig the bead belongs to'),
- bead_id: tool.schema.string().describe('The UUID of the bead to delete'),
+ rig_id: tool.schema.string().describe('The UUID of the rig the bead(s) belong to'),
+ bead_id: tool.schema
+ .union([tool.schema.string(), tool.schema.array(tool.schema.string())])
+ .describe('A single bead UUID or an array of bead UUIDs to delete'),
},
async execute(args) {
- await client.deleteBead(args.rig_id, args.bead_id);
- return `Bead ${args.bead_id} deleted.`;
+ const ids = Array.isArray(args.bead_id) ? args.bead_id : [args.bead_id];
+ if (ids.length === 1 && ids[0]) {
+ await client.deleteBead(args.rig_id, ids[0]);
+ return `Bead ${ids[0]} deleted.`;
+ }
+ const result = await client.deleteBeads(args.rig_id, ids);
+ return `Deleted ${result.deleted} beads.`;
},
}),
@@ -371,6 +387,40 @@ export function createMayorTools(client: MayorGastownClient) {
},
}),
+ gt_convoy_add_bead: tool({
+ description:
+ 'Add an existing bead to an existing convoy. Use this after gt_sling to make a standalone bead ' +
+ "part of a convoy's tracked progress and landing. The bead will count toward the convoy's " +
+ 'completion and will be included in the convoy landing.',
+ args: {
+ convoy_id: tool.schema.string().describe('UUID of the convoy to add the bead to'),
+ bead_id: tool.schema.string().describe('UUID of the bead to add'),
+ depends_on: tool.schema
+ .array(tool.schema.string())
+ .describe('Optional: bead UUIDs that must complete before this bead is dispatched')
+ .optional(),
+ },
+ async execute(args) {
+ const result = await client.convoyAddBead(args.convoy_id, args.bead_id, args.depends_on);
+ return `Bead ${args.bead_id} added to convoy ${args.convoy_id}. Convoy now tracking ${result.total_beads} beads.`;
+ },
+ }),
+
+ gt_convoy_remove_bead: tool({
+ description:
+ 'Remove a bead from a convoy. The bead will no longer count toward convoy progress or landing. ' +
+ 'Dependency edges between this bead and other convoy beads are also removed. ' +
+ 'The bead itself is not deleted — it becomes a standalone bead.',
+ args: {
+ convoy_id: tool.schema.string().describe('UUID of the convoy'),
+ bead_id: tool.schema.string().describe('UUID of the bead to remove from the convoy'),
+ },
+ async execute(args) {
+ const result = await client.convoyRemoveBead(args.convoy_id, args.bead_id);
+ return `Bead ${args.bead_id} removed from convoy ${args.convoy_id}. Convoy now tracking ${result.total_beads} beads.`;
+ },
+ }),
+
gt_convoy_update: tool({
description: 'Edit convoy metadata (merge_mode, feature_branch).',
args: {
diff --git a/services/gastown/container/plugin/package.json b/services/gastown/container/plugin/package.json
index 899ebb7615..9c89e81375 100644
--- a/services/gastown/container/plugin/package.json
+++ b/services/gastown/container/plugin/package.json
@@ -6,8 +6,8 @@
"description": "Kilo plugin exposing Gastown tools to agents",
"main": "index.ts",
"dependencies": {
- "@kilocode/plugin": "7.1.23",
- "@kilocode/sdk": "7.1.23",
+ "@kilocode/plugin": "7.2.14",
+ "@kilocode/sdk": "7.2.14",
"zod": "^4.3.5"
}
}
diff --git a/services/gastown/container/src/agent-runner.ts b/services/gastown/container/src/agent-runner.ts
index e13d7e9d90..9ce5618dc8 100644
--- a/services/gastown/container/src/agent-runner.ts
+++ b/services/gastown/container/src/agent-runner.ts
@@ -4,6 +4,7 @@ import { writeFile } from 'node:fs/promises';
import { cloneRepo, createWorktree, setupRigBrowseWorktree } from './git-manager';
import { startAgent } from './process-manager';
import { getCurrentTownConfig } from './control-server';
+import { log } from './logger';
import type { ManagedAgent, StartAgentRequest } from './types';
/**
@@ -471,6 +472,7 @@ export async function writeMayorSystemPromptToAgentsMd(
export async function runAgent(originalRequest: StartAgentRequest): Promise {
let request = originalRequest;
let workdir: string;
+ const t0 = Date.now();
if (request.role === 'triage' || request.lightweight) {
// Triage/lightweight agents are pure reasoning — no code changes, no git needed.
@@ -570,6 +572,12 @@ export async function runAgent(originalRequest: StartAgentRequest): Promise | null = null;
+// Track which custom env var keys were applied last sync so removed keys can be cleared.
+let lastAppliedEnvVarKeys = new Set();
+
+// Env keys managed by the control plane that custom env_vars must never override.
+// If a custom key collides with a reserved key, the infra value wins and the
+// custom value is silently ignored — matching the !(key in env) guard in buildAgentEnv.
+export const RESERVED_ENV_KEYS = new Set([
+ 'KILOCODE_TOKEN',
+ 'GIT_TOKEN',
+ 'GITHUB_TOKEN',
+ 'GITLAB_TOKEN',
+ 'GITLAB_INSTANCE_URL',
+ 'GITHUB_CLI_PAT',
+ 'GH_TOKEN',
+ 'GASTOWN_GIT_AUTHOR_NAME',
+ 'GASTOWN_GIT_AUTHOR_EMAIL',
+ 'GASTOWN_DISABLE_AI_COAUTHOR',
+ 'GASTOWN_ORGANIZATION_ID',
+ 'GASTOWN_CONTAINER_TOKEN',
+ 'GASTOWN_SESSION_TOKEN',
+ 'GASTOWN_API_URL',
+ // Runtime routing vars read by pending-nudge routes and plugin clients —
+ // must never be overwritten by user-supplied env_vars.
+ 'GASTOWN_TOWN_ID',
+ 'GASTOWN_RIG_ID',
+]);
+
/** Get the latest town config delivered via X-Town-Config header. */
export function getCurrentTownConfig(): Record | null {
return lastKnownTownConfig;
}
+/** Get the set of custom env var keys applied in the last sync. */
+export function getLastAppliedEnvVarKeys(): Set {
+ return lastAppliedEnvVarKeys;
+}
+
/**
* Sync config-derived env vars from the last-known town config into
* process.env. Safe to call at any time — no-ops when no config is cached.
@@ -102,6 +136,27 @@ function syncTownConfigToProcessEnv(): void {
} else {
delete process.env.GASTOWN_ORGANIZATION_ID;
}
+
+ // Apply custom env_vars from the town config. Reserved infra keys are
+ // skipped so the control-plane values always take precedence — matching the
+ // !(key in env) guard in buildAgentEnv.
+ const rawEnvVars = cfg.env_vars;
+ const customEnvVars: Record =
+ rawEnvVars !== null && typeof rawEnvVars === 'object' && !Array.isArray(rawEnvVars)
+ ? (rawEnvVars as Record)
+ : {};
+ const newCustomKeys = new Set(Object.keys(customEnvVars));
+ // Remove keys that were present in the previous sync but are gone now.
+ // Skip reserved keys — deleting those would wipe a control-plane value.
+ for (const key of lastAppliedEnvVarKeys) {
+ if (!newCustomKeys.has(key) && !RESERVED_ENV_KEYS.has(key)) delete process.env[key];
+ }
+ // Apply current custom env vars, skipping reserved keys.
+ for (const [key, value] of Object.entries(customEnvVars)) {
+ if (RESERVED_ENV_KEYS.has(key)) continue;
+ process.env[key] = String(value);
+ }
+ lastAppliedEnvVarKeys = newCustomKeys;
}
export const app = new Hono();
@@ -165,6 +220,7 @@ app.get('/health', c => {
servers: activeServerCount(),
uptime: getUptime(),
draining: isDraining() || undefined,
+ startedAt: getStartTime(),
};
return c.json(response);
});
@@ -471,9 +527,9 @@ app.post('/repos/setup', async c => {
// POST /git/merge
// Deterministic merge of a polecat branch into the target branch.
-// Called by the Rig DO's processReviewQueue → startMergeInContainer.
-// Runs the merge synchronously and reports the result back to the Rig DO
-// via a callback to the completeReview endpoint.
+// Called by the TownDO's startMergeInContainer.
+// Runs the merge synchronously and reports the result back via a callback
+// to the completeReview endpoint.
app.post('/git/merge', async c => {
const body: unknown = await c.req.json().catch(() => null);
const parsed = MergeRequest.safeParse(body);
@@ -539,7 +595,7 @@ app.post('/git/merge', async c => {
}
};
- // Fire and forget — the Rig DO will time out stuck entries via recoverStuckReviews
+ // Fire and forget — the TownDO will time out stuck entries via its alarm loop
doMerge().catch(err => {
console.error(`Merge failed for entry ${req.entryId}:`, err);
});
@@ -685,6 +741,15 @@ app.post('/agents/:agentId/pty', async c => {
console.log(
`[control-server] Reusing existing PTY session ${running.id} for agent ${agentId}`
);
+ const reuseAgent = getAgentStatus(agentId);
+ if (reuseAgent) {
+ log.info('agent.pty_connected', {
+ agentId,
+ containerUptimeMs: getUptime(),
+ agentUptimeMs: Date.now() - new Date(reuseAgent.startedAt).getTime(),
+ reused: true,
+ });
+ }
return c.json(running);
}
}
@@ -737,6 +802,14 @@ app.post('/agents/:agentId/pty', async c => {
console.log(
`[control-server] Created new PTY session for agent ${agentId}: ${data.slice(0, 200)}`
);
+ if (createResp.ok) {
+ log.info('agent.pty_connected', {
+ agentId,
+ containerUptimeMs: getUptime(),
+ agentUptimeMs: Date.now() - new Date(agent.startedAt).getTime(),
+ reused: false,
+ });
+ }
return new Response(data, {
status: createResp.status,
headers: { 'Content-Type': 'application/json' },
diff --git a/services/gastown/container/src/main.ts b/services/gastown/container/src/main.ts
index d1ed16ef43..5b12a1aa19 100644
--- a/services/gastown/container/src/main.ts
+++ b/services/gastown/container/src/main.ts
@@ -1,8 +1,8 @@
import { startControlServer } from './control-server';
import { log } from './logger';
-import { bootHydration } from './process-manager';
+import { bootHydration, getUptime } from './process-manager';
-log.info('container.cold_start', { uptime: 0, ts: new Date().toISOString() });
+log.info('container.cold_start', { uptime: getUptime(), ts: new Date().toISOString() });
process.on('uncaughtException', err => {
log.error('container.uncaught_exception', { error: err.message, stack: err.stack });
diff --git a/services/gastown/container/src/process-manager.ts b/services/gastown/container/src/process-manager.ts
index 77aa4bb411..f2dea4be01 100644
--- a/services/gastown/container/src/process-manager.ts
+++ b/services/gastown/container/src/process-manager.ts
@@ -12,6 +12,11 @@ import * as fs from 'node:fs/promises';
import type { ManagedAgent, StartAgentRequest } from './types';
import { reportAgentCompleted, reportMayorWaiting } from './completion-reporter';
import { buildKiloConfigContent } from './agent-runner';
+import {
+ getCurrentTownConfig,
+ getLastAppliedEnvVarKeys,
+ RESERVED_ENV_KEYS,
+} from './control-server';
import { log } from './logger';
const MANAGER_LOG = '[process-manager]';
@@ -69,6 +74,10 @@ export function getUptime(): number {
return Date.now() - startTime;
}
+export function getStartTime(): string {
+ return new Date(startTime).toISOString();
+}
+
async function hydrateDbFromSnapshot(
agentId: string,
apiUrl: string,
@@ -108,6 +117,108 @@ async function hydrateDbFromSnapshot(
}
}
+async function deleteLocalDb(agentId: string): Promise {
+ const dir = `/tmp/agent-home-${agentId}/.local/share/kilo`;
+ for (const suffix of ['kilo.db', 'kilo.db-wal', 'kilo.db-shm']) {
+ try {
+ await fs.unlink(`${dir}/${suffix}`);
+ } catch {
+ // File may not exist — that's fine.
+ }
+ }
+ console.log(`${MANAGER_LOG} Deleted local kilo.db for agent ${agentId}`);
+}
+
+async function deleteRemoteDbSnapshot(
+ agentId: string,
+ apiUrl: string,
+ token: string,
+ rigId: string,
+ townId: string
+): Promise {
+ try {
+ const resp = await fetch(
+ `${apiUrl}/api/towns/${townId}/rigs/${rigId}/agents/${agentId}/db-snapshot`,
+ { method: 'DELETE', headers: { Authorization: `Bearer ${token}` } }
+ );
+ if (resp.ok) {
+ console.log(`${MANAGER_LOG} Deleted remote DB snapshot for agent ${agentId}`);
+ } else {
+ console.warn(
+ `${MANAGER_LOG} Failed to delete remote DB snapshot for ${agentId}: ${resp.status}`
+ );
+ }
+ } catch (err) {
+ console.warn(`${MANAGER_LOG} deleteRemoteDbSnapshot failed for ${agentId}:`, err);
+ }
+}
+
+/**
+ * Try session.create; if it fails (e.g. stale kilo.db from an older CLI
+ * version whose schema is incompatible), delete the local DB, tear down
+ * the SDK server, restart it fresh, and retry once.
+ */
+async function createSessionWithStaleDbFallback(
+ client: KiloClient,
+ workdir: string,
+ env: Record,
+ agentId: string,
+ agent: ManagedAgent
+): Promise {
+ const sessionResult = await client.session.create({ body: {} });
+ const rawSession: unknown = sessionResult.data ?? sessionResult;
+ const parsed = SessionResponse.safeParse(rawSession);
+ if (parsed.success) {
+ console.log(`${MANAGER_LOG} Created new session ${parsed.data.id}`);
+ return parsed.data.id;
+ }
+
+ // session.create failed — likely a stale kilo.db migration error.
+ const rawStr = JSON.stringify(rawSession).slice(0, 300);
+ console.warn(
+ `${MANAGER_LOG} session.create failed for ${agentId}, attempting stale DB recovery. Response: ${rawStr}`
+ );
+
+ // 1. Delete local kilo.db so the CLI starts with a fresh schema.
+ await deleteLocalDb(agentId);
+
+ // 2. Tear down the SDK server so ensureSDKServer creates a new one.
+ const instance = sdkInstances.get(workdir);
+ if (instance) {
+ instance.server.close();
+ sdkInstances.delete(workdir);
+ }
+
+ // 3. Delete the stale KV snapshot (fire-and-forget) so future container
+ // restarts don't re-hydrate the broken DB.
+ const apiUrl = agent.gastownApiUrl;
+ const token = agent.gastownContainerToken ?? process.env.GASTOWN_CONTAINER_TOKEN ?? null;
+ if (apiUrl && token) {
+ void deleteRemoteDbSnapshot(agentId, apiUrl, token, agent.rigId, agent.townId);
+ }
+
+ // 4. Restart SDK server and retry session.create.
+ const { client: freshClient, port } = await ensureSDKServer(workdir, env);
+ agent.serverPort = port;
+
+ const retryResult = await freshClient.session.create({ body: {} });
+ const retryRaw: unknown = retryResult.data ?? retryResult;
+ const retryParsed = SessionResponse.safeParse(retryRaw);
+ if (!retryParsed.success) {
+ console.error(
+ `${MANAGER_LOG} session.create still failing after DB reset:`,
+ JSON.stringify(retryRaw).slice(0, 200),
+ retryParsed.error.issues
+ );
+ throw new Error('SDK session.create failed even after stale DB recovery');
+ }
+
+ console.log(
+ `${MANAGER_LOG} Stale DB recovery succeeded for ${agentId}, new session ${retryParsed.data.id}`
+ );
+ return retryParsed.data.id;
+}
+
async function saveDbSnapshot(
agentId: string,
apiUrl: string,
@@ -836,6 +947,7 @@ export async function startAgent(
const { signal } = startupAbortController;
let sessionCounted = false;
+ const t0 = Date.now();
try {
// 0. Hydrate agent DB from KV snapshot before starting the SDK server
const apiUrl = agent.gastownApiUrl;
@@ -843,10 +955,23 @@ export async function startAgent(
if (apiUrl && token) {
await hydrateDbFromSnapshot(request.agentId, apiUrl, token, request.rigId, request.townId);
}
+ const tDbDone = Date.now();
+ log.info('agent.startup_phase', {
+ agentId: request.agentId,
+ phase: 'db_hydrated',
+ elapsedMs: tDbDone - t0,
+ });
// 1. Ensure SDK server is running for this workdir
const { client, port } = await ensureSDKServer(workdir, env);
agent.serverPort = port;
+ const tSdkDone = Date.now();
+ log.info('agent.startup_phase', {
+ agentId: request.agentId,
+ phase: 'sdk_ready',
+ elapsedMs: tSdkDone - t0,
+ phaseMs: tSdkDone - tDbDone,
+ });
// Check if startup was cancelled while waiting for the SDK server
if (signal.aborted) {
@@ -885,21 +1010,23 @@ export async function startAgent(
}
}
if (!resumed) {
- const sessionResult = await client.session.create({ body: {} });
- const rawSession: unknown = sessionResult.data ?? sessionResult;
- const parsed = SessionResponse.safeParse(rawSession);
- if (!parsed.success) {
- console.error(
- `${MANAGER_LOG} SDK session.create returned unexpected shape:`,
- JSON.stringify(rawSession).slice(0, 200),
- parsed.error.issues
- );
- throw new Error('SDK session.create response missing required "id" field');
- }
- sessionId = parsed.data.id;
- console.log(`${MANAGER_LOG} Created new session ${sessionId}`);
+ sessionId = await createSessionWithStaleDbFallback(
+ client,
+ workdir,
+ env,
+ request.agentId,
+ agent
+ );
}
agent.sessionId = sessionId;
+ const tSessionDone = Date.now();
+ log.info('agent.startup_phase', {
+ agentId: request.agentId,
+ phase: 'session_created',
+ elapsedMs: tSessionDone - t0,
+ phaseMs: tSessionDone - tSdkDone,
+ resumed,
+ });
// Now check if startup was cancelled while creating the session.
// agent.sessionId is already set, so the catch block will abort it.
@@ -967,6 +1094,12 @@ export async function startAgent(
port,
});
+ log.info('agent.startup_complete', {
+ agentId: request.agentId,
+ totalMs: Date.now() - t0,
+ containerUptimeMs: getUptime(),
+ });
+
syncRegistry();
return agent;
} catch (err) {
@@ -1264,6 +1397,35 @@ export async function updateAgentModel(
if (live) hotSwapEnv[key] = live;
}
+ // Overlay custom env_vars from the town config so hot-swap picks up
+ // values that were added/changed after the initial dispatch. Infra
+ // keys in LIVE_ENV_KEYS and RESERVED_ENV_KEYS always take precedence
+ // (LIVE_ENV_KEYS were already populated from process.env above;
+ // RESERVED_ENV_KEYS are runtime routing vars that must never be clobbered).
+ const freshConfig = getCurrentTownConfig();
+ const freshEnvVars = freshConfig?.env_vars;
+ const freshCustomKeySet = new Set();
+ if (freshEnvVars !== null && typeof freshEnvVars === 'object' && !Array.isArray(freshEnvVars)) {
+ for (const [key, value] of Object.entries(freshEnvVars as Record)) {
+ if (LIVE_ENV_KEYS.has(key)) continue;
+ if (RESERVED_ENV_KEYS.has(key)) continue;
+ freshCustomKeySet.add(key);
+ if (value !== undefined && value !== null) {
+ hotSwapEnv[key] = typeof value === 'string' ? value : JSON.stringify(value);
+ } else {
+ delete hotSwapEnv[key];
+ }
+ }
+ }
+ // Remove stale custom env vars — keys that were applied in a previous
+ // sync but are no longer in the town config. Without this, startupEnv
+ // keeps carrying deleted custom keys through every hot-swap.
+ for (const key of getLastAppliedEnvVarKeys()) {
+ if (!freshCustomKeySet.has(key) && !LIVE_ENV_KEYS.has(key)) {
+ delete hotSwapEnv[key];
+ }
+ }
+
// Re-derive GH_TOKEN from live values using the same priority chain
// as buildAgentEnv: GITHUB_CLI_PAT > GIT_TOKEN > GITHUB_TOKEN.
// syncConfigToContainer updates these on process.env, but buildAgentEnv
@@ -1304,13 +1466,13 @@ export async function updateAgentModel(
}
}
if (!resumedSession) {
- const sessionResult = await client.session.create({ body: {} });
- const rawSession: unknown = sessionResult.data ?? sessionResult;
- const parsed = SessionResponse.safeParse(rawSession);
- if (!parsed.success) {
- throw new Error('SDK session.create response missing required "id" field');
- }
- newSessionId = parsed.data.id;
+ newSessionId = await createSessionWithStaleDbFallback(
+ client,
+ agent.workdir,
+ hotSwapEnv,
+ agentId,
+ agent
+ );
}
agent.sessionId = newSessionId;
diff --git a/services/gastown/container/src/types.ts b/services/gastown/container/src/types.ts
index cc74ae96c6..d09a1e53c6 100644
--- a/services/gastown/container/src/types.ts
+++ b/services/gastown/container/src/types.ts
@@ -164,6 +164,7 @@ export type HealthResponse = {
servers: number;
uptime: number;
draining?: boolean;
+ startedAt?: string;
};
// ── Kilo serve instance ─────────────────────────────────────────────────
diff --git a/services/gastown/gastown-grafana-dash-1.json b/services/gastown/gastown-grafana-dash-1.json
index e538890720..ee44a3ec0b 100644
--- a/services/gastown/gastown-grafana-dash-1.json
+++ b/services/gastown/gastown-grafana-dash-1.json
@@ -3173,6 +3173,833 @@
],
"title": "Pending Event Queue Depth",
"type": "gauge"
+ },
+ {
+ "collapsed": false,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 129
+ },
+ "id": 300,
+ "panels": [],
+ "title": "Container Startup Latency",
+ "type": "row"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "bffxugc31cnpcc"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": 0
+ },
+ {
+ "color": "yellow",
+ "value": 500
+ },
+ {
+ "color": "red",
+ "value": 2000
+ }
+ ]
+ },
+ "unit": "ms"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 4,
+ "w": 6,
+ "x": 0,
+ "y": 130
+ },
+ "id": 303,
+ "options": {
+ "colorMode": "value",
+ "graphMode": "area",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "percentChangeColorMode": "standard",
+ "reduceOptions": {
+ "calcs": ["lastNotNull"],
+ "fields": "",
+ "values": false
+ },
+ "showPercentChange": false,
+ "textMode": "auto",
+ "wideLayout": true
+ },
+ "pluginVersion": "12.4.1",
+ "targets": [
+ {
+ "adHocFilters": [],
+ "adHocValuesQuery": "",
+ "add_metadata": true,
+ "contextWindowSize": "10",
+ "dateTimeColDataType": "timestamp",
+ "dateTimeType": "DATETIME",
+ "editorMode": "sql",
+ "extrapolate": true,
+ "format": "table",
+ "interval": "",
+ "intervalFactor": 1,
+ "nullifySparse": false,
+ "query": "SELECT SUM(_sample_interval * double1) / SUM(_sample_interval) AS avg_ms FROM gastown_events WHERE $timeFilter AND blob1 = 'container.health_ping' AND blob8 = 'ok'",
+ "rawSql": "SELECT SUM(_sample_interval * double1) / SUM(_sample_interval) AS avg_ms FROM gastown_events WHERE $timeFilter AND blob1 = 'container.health_ping' AND blob8 = 'ok'",
+ "refId": "A",
+ "round": "0s",
+ "showFormattedSQL": false,
+ "showHelp": false,
+ "skip_comments": true,
+ "table": "gastown_events",
+ "useWindowFuncForMacros": true
+ }
+ ],
+ "timeFrom": "1h",
+ "title": "Avg Health Ping (1h)",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "bffxugc31cnpcc"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": 0
+ },
+ {
+ "color": "yellow",
+ "value": 1000
+ },
+ {
+ "color": "red",
+ "value": 5000
+ }
+ ]
+ },
+ "unit": "ms"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 4,
+ "w": 6,
+ "x": 6,
+ "y": 130
+ },
+ "id": 304,
+ "options": {
+ "colorMode": "value",
+ "graphMode": "area",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "percentChangeColorMode": "standard",
+ "reduceOptions": {
+ "calcs": ["lastNotNull"],
+ "fields": "",
+ "values": false
+ },
+ "showPercentChange": false,
+ "textMode": "auto",
+ "wideLayout": true
+ },
+ "pluginVersion": "12.4.1",
+ "targets": [
+ {
+ "adHocFilters": [],
+ "adHocValuesQuery": "",
+ "add_metadata": true,
+ "contextWindowSize": "10",
+ "dateTimeColDataType": "timestamp",
+ "dateTimeType": "DATETIME",
+ "editorMode": "sql",
+ "extrapolate": true,
+ "format": "table",
+ "interval": "",
+ "intervalFactor": 1,
+ "nullifySparse": false,
+ "query": "SELECT SUM(_sample_interval * double1) / SUM(_sample_interval) AS avg_ms FROM gastown_events WHERE $timeFilter AND blob1 = 'container.agent_start_fetch' AND blob9 = 'true'",
+ "rawSql": "SELECT SUM(_sample_interval * double1) / SUM(_sample_interval) AS avg_ms FROM gastown_events WHERE $timeFilter AND blob1 = 'container.agent_start_fetch' AND blob9 = 'true'",
+ "refId": "A",
+ "round": "0s",
+ "showFormattedSQL": false,
+ "showHelp": false,
+ "skip_comments": true,
+ "table": "gastown_events",
+ "useWindowFuncForMacros": true
+ }
+ ],
+ "timeFrom": "1h",
+ "title": "Avg Agent Start Fetch (1h)",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "bffxugc31cnpcc"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "line",
+ "fillOpacity": 15,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "smooth",
+ "lineWidth": 2,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "showValues": false,
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": 0
+ }
+ ]
+ },
+ "unit": "ms"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "timeout_rate"
+ },
+ "properties": [
+ {
+ "id": "custom.axisPlacement",
+ "value": "right"
+ },
+ {
+ "id": "unit",
+ "value": "percentunit"
+ },
+ {
+ "id": "min",
+ "value": 0
+ },
+ {
+ "id": "max",
+ "value": 1
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 134
+ },
+ "id": 301,
+ "options": {
+ "legend": {
+ "calcs": ["mean", "max"],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "hideZeros": false,
+ "mode": "multi",
+ "sort": "desc"
+ }
+ },
+ "pluginVersion": "12.4.1",
+ "targets": [
+ {
+ "adHocFilters": [],
+ "adHocValuesQuery": "",
+ "add_metadata": true,
+ "contextWindowSize": "10",
+ "dateTimeColDataType": "timestamp",
+ "dateTimeType": "DATETIME",
+ "editorMode": "sql",
+ "extrapolate": true,
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "nullifySparse": false,
+ "query": "SELECT $timeSeries AS t, 'p50' AS label, quantileWeighted(0.50)(double1, _sample_interval) AS latency_ms FROM gastown_events WHERE $timeFilter AND blob1 = 'container.health_ping' AND blob8 = 'ok' GROUP BY t ORDER BY t",
+ "rawSql": "SELECT $timeSeries AS t, 'p50' AS label, quantileWeighted(0.50)(double1, _sample_interval) AS latency_ms FROM gastown_events WHERE $timeFilter AND blob1 = 'container.health_ping' AND blob8 = 'ok' GROUP BY t ORDER BY t",
+ "refId": "A",
+ "round": "0s",
+ "showFormattedSQL": false,
+ "showHelp": false,
+ "skip_comments": true,
+ "table": "gastown_events",
+ "useWindowFuncForMacros": true
+ },
+ {
+ "adHocFilters": [],
+ "adHocValuesQuery": "",
+ "add_metadata": true,
+ "contextWindowSize": "10",
+ "dateTimeColDataType": "timestamp",
+ "dateTimeType": "DATETIME",
+ "editorMode": "sql",
+ "extrapolate": true,
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "nullifySparse": false,
+ "query": "SELECT $timeSeries AS t, 'p90' AS label, quantileWeighted(0.90)(double1, _sample_interval) AS latency_ms FROM gastown_events WHERE $timeFilter AND blob1 = 'container.health_ping' AND blob8 = 'ok' GROUP BY t ORDER BY t",
+ "rawSql": "SELECT $timeSeries AS t, 'p90' AS label, quantileWeighted(0.90)(double1, _sample_interval) AS latency_ms FROM gastown_events WHERE $timeFilter AND blob1 = 'container.health_ping' AND blob8 = 'ok' GROUP BY t ORDER BY t",
+ "refId": "B",
+ "round": "0s",
+ "showFormattedSQL": false,
+ "showHelp": false,
+ "skip_comments": true,
+ "table": "gastown_events",
+ "useWindowFuncForMacros": true
+ },
+ {
+ "adHocFilters": [],
+ "adHocValuesQuery": "",
+ "add_metadata": true,
+ "contextWindowSize": "10",
+ "dateTimeColDataType": "timestamp",
+ "dateTimeType": "DATETIME",
+ "editorMode": "sql",
+ "extrapolate": true,
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "nullifySparse": false,
+ "query": "SELECT $timeSeries AS t, 'p99' AS label, quantileWeighted(0.99)(double1, _sample_interval) AS latency_ms FROM gastown_events WHERE $timeFilter AND blob1 = 'container.health_ping' AND blob8 = 'ok' GROUP BY t ORDER BY t",
+ "rawSql": "SELECT $timeSeries AS t, 'p99' AS label, quantileWeighted(0.99)(double1, _sample_interval) AS latency_ms FROM gastown_events WHERE $timeFilter AND blob1 = 'container.health_ping' AND blob8 = 'ok' GROUP BY t ORDER BY t",
+ "refId": "C",
+ "round": "0s",
+ "showFormattedSQL": false,
+ "showHelp": false,
+ "skip_comments": true,
+ "table": "gastown_events",
+ "useWindowFuncForMacros": true
+ },
+ {
+ "adHocFilters": [],
+ "adHocValuesQuery": "",
+ "add_metadata": true,
+ "contextWindowSize": "10",
+ "dateTimeColDataType": "timestamp",
+ "dateTimeType": "DATETIME",
+ "editorMode": "sql",
+ "extrapolate": true,
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "nullifySparse": false,
+ "query": "SELECT $timeSeries AS t, 'timeout_rate' AS label, SUM(IF(blob8 = 'timeout', _sample_interval, 0)) / SUM(_sample_interval) AS timeout_rate FROM gastown_events WHERE $timeFilter AND blob1 = 'container.health_ping' GROUP BY t ORDER BY t",
+ "rawSql": "SELECT $timeSeries AS t, 'timeout_rate' AS label, SUM(IF(blob8 = 'timeout', _sample_interval, 0)) / SUM(_sample_interval) AS timeout_rate FROM gastown_events WHERE $timeFilter AND blob1 = 'container.health_ping' GROUP BY t ORDER BY t",
+ "refId": "D",
+ "round": "0s",
+ "showFormattedSQL": false,
+ "showHelp": false,
+ "skip_comments": true,
+ "table": "gastown_events",
+ "useWindowFuncForMacros": true
+ }
+ ],
+ "title": "Container Health Ping Latency (cold-start indicator)",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "bffxugc31cnpcc"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "line",
+ "fillOpacity": 15,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "smooth",
+ "lineWidth": 2,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "showValues": false,
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": 0
+ }
+ ]
+ },
+ "unit": "ms"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "failure_rate"
+ },
+ "properties": [
+ {
+ "id": "custom.axisPlacement",
+ "value": "right"
+ },
+ {
+ "id": "unit",
+ "value": "percentunit"
+ },
+ {
+ "id": "min",
+ "value": 0
+ },
+ {
+ "id": "max",
+ "value": 1
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 134
+ },
+ "id": 302,
+ "options": {
+ "legend": {
+ "calcs": ["mean", "max"],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "hideZeros": false,
+ "mode": "multi",
+ "sort": "desc"
+ }
+ },
+ "pluginVersion": "12.4.1",
+ "targets": [
+ {
+ "adHocFilters": [],
+ "adHocValuesQuery": "",
+ "add_metadata": true,
+ "contextWindowSize": "10",
+ "dateTimeColDataType": "timestamp",
+ "dateTimeType": "DATETIME",
+ "editorMode": "sql",
+ "extrapolate": true,
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "nullifySparse": false,
+ "query": "SELECT $timeSeries AS t, 'p50' AS label, quantileWeighted(0.50)(double1, _sample_interval) AS latency_ms FROM gastown_events WHERE $timeFilter AND blob1 = 'container.agent_start_fetch' AND blob9 = 'true' GROUP BY t ORDER BY t",
+ "rawSql": "SELECT $timeSeries AS t, 'p50' AS label, quantileWeighted(0.50)(double1, _sample_interval) AS latency_ms FROM gastown_events WHERE $timeFilter AND blob1 = 'container.agent_start_fetch' AND blob9 = 'true' GROUP BY t ORDER BY t",
+ "refId": "A",
+ "round": "0s",
+ "showFormattedSQL": false,
+ "showHelp": false,
+ "skip_comments": true,
+ "table": "gastown_events",
+ "useWindowFuncForMacros": true
+ },
+ {
+ "adHocFilters": [],
+ "adHocValuesQuery": "",
+ "add_metadata": true,
+ "contextWindowSize": "10",
+ "dateTimeColDataType": "timestamp",
+ "dateTimeType": "DATETIME",
+ "editorMode": "sql",
+ "extrapolate": true,
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "nullifySparse": false,
+ "query": "SELECT $timeSeries AS t, 'p90' AS label, quantileWeighted(0.90)(double1, _sample_interval) AS latency_ms FROM gastown_events WHERE $timeFilter AND blob1 = 'container.agent_start_fetch' AND blob9 = 'true' GROUP BY t ORDER BY t",
+ "rawSql": "SELECT $timeSeries AS t, 'p90' AS label, quantileWeighted(0.90)(double1, _sample_interval) AS latency_ms FROM gastown_events WHERE $timeFilter AND blob1 = 'container.agent_start_fetch' AND blob9 = 'true' GROUP BY t ORDER BY t",
+ "refId": "B",
+ "round": "0s",
+ "showFormattedSQL": false,
+ "showHelp": false,
+ "skip_comments": true,
+ "table": "gastown_events",
+ "useWindowFuncForMacros": true
+ },
+ {
+ "adHocFilters": [],
+ "adHocValuesQuery": "",
+ "add_metadata": true,
+ "contextWindowSize": "10",
+ "dateTimeColDataType": "timestamp",
+ "dateTimeType": "DATETIME",
+ "editorMode": "sql",
+ "extrapolate": true,
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "nullifySparse": false,
+ "query": "SELECT $timeSeries AS t, 'p99' AS label, quantileWeighted(0.99)(double1, _sample_interval) AS latency_ms FROM gastown_events WHERE $timeFilter AND blob1 = 'container.agent_start_fetch' AND blob9 = 'true' GROUP BY t ORDER BY t",
+ "rawSql": "SELECT $timeSeries AS t, 'p99' AS label, quantileWeighted(0.99)(double1, _sample_interval) AS latency_ms FROM gastown_events WHERE $timeFilter AND blob1 = 'container.agent_start_fetch' AND blob9 = 'true' GROUP BY t ORDER BY t",
+ "refId": "C",
+ "round": "0s",
+ "showFormattedSQL": false,
+ "showHelp": false,
+ "skip_comments": true,
+ "table": "gastown_events",
+ "useWindowFuncForMacros": true
+ },
+ {
+ "adHocFilters": [],
+ "adHocValuesQuery": "",
+ "add_metadata": true,
+ "contextWindowSize": "10",
+ "dateTimeColDataType": "timestamp",
+ "dateTimeType": "DATETIME",
+ "editorMode": "sql",
+ "extrapolate": true,
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "nullifySparse": false,
+ "query": "SELECT $timeSeries AS t, 'failure_rate' AS label, SUM(IF(blob9 = 'false', _sample_interval, 0)) / SUM(_sample_interval) AS failure_rate FROM gastown_events WHERE $timeFilter AND blob1 = 'container.agent_start_fetch' GROUP BY t ORDER BY t",
+ "rawSql": "SELECT $timeSeries AS t, 'failure_rate' AS label, SUM(IF(blob9 = 'false', _sample_interval, 0)) / SUM(_sample_interval) AS failure_rate FROM gastown_events WHERE $timeFilter AND blob1 = 'container.agent_start_fetch' GROUP BY t ORDER BY t",
+ "refId": "D",
+ "round": "0s",
+ "showFormattedSQL": false,
+ "showHelp": false,
+ "skip_comments": true,
+ "table": "gastown_events",
+ "useWindowFuncForMacros": true
+ }
+ ],
+ "title": "Container Agent Start Latency (Town DO \u2192 container.fetch('/agents/start') round-trip)",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "bffxugc31cnpcc"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "line",
+ "fillOpacity": 15,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "smooth",
+ "lineWidth": 2,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "showValues": false,
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": 0
+ }
+ ]
+ },
+ "unit": "percentunit"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 142
+ },
+ "id": 305,
+ "options": {
+ "legend": {
+ "calcs": ["mean", "max"],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "hideZeros": false,
+ "mode": "multi",
+ "sort": "desc"
+ }
+ },
+ "pluginVersion": "12.4.1",
+ "targets": [
+ {
+ "adHocFilters": [],
+ "adHocValuesQuery": "",
+ "add_metadata": true,
+ "contextWindowSize": "10",
+ "dateTimeColDataType": "timestamp",
+ "dateTimeType": "DATETIME",
+ "editorMode": "sql",
+ "extrapolate": true,
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "nullifySparse": false,
+ "query": "SELECT $timeSeries AS t, 'timeout_rate' AS label, SUM(IF(blob8 = 'timeout', _sample_interval, 0)) / SUM(_sample_interval) AS rate FROM gastown_events WHERE $timeFilter AND blob1 = 'container.health_ping' GROUP BY t ORDER BY t",
+ "rawSql": "SELECT $timeSeries AS t, 'timeout_rate' AS label, SUM(IF(blob8 = 'timeout', _sample_interval, 0)) / SUM(_sample_interval) AS rate FROM gastown_events WHERE $timeFilter AND blob1 = 'container.health_ping' GROUP BY t ORDER BY t",
+ "refId": "A",
+ "round": "0s",
+ "showFormattedSQL": false,
+ "showHelp": false,
+ "skip_comments": true,
+ "table": "gastown_events",
+ "useWindowFuncForMacros": true
+ }
+ ],
+ "title": "Health Ping Timeout Rate (container cold-start frequency)",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "bffxugc31cnpcc"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "bars",
+ "fillOpacity": 80,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 0,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "showValues": false,
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": 0
+ }
+ ]
+ }
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "success"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "green",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "failure"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "red",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 142
+ },
+ "id": 306,
+ "options": {
+ "legend": {
+ "calcs": ["sum"],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "hideZeros": false,
+ "mode": "multi",
+ "sort": "desc"
+ }
+ },
+ "pluginVersion": "12.4.1",
+ "targets": [
+ {
+ "adHocFilters": [],
+ "adHocValuesQuery": "",
+ "add_metadata": true,
+ "contextWindowSize": "10",
+ "dateTimeColDataType": "timestamp",
+ "dateTimeType": "DATETIME",
+ "editorMode": "sql",
+ "extrapolate": true,
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "nullifySparse": false,
+ "query": "SELECT $timeSeries AS t, IF(blob9 = 'true', 'success', 'failure') AS label, SUM(_sample_interval) AS count FROM gastown_events WHERE $timeFilter AND blob1 = 'container.agent_start_fetch' GROUP BY t, label ORDER BY t",
+ "rawSql": "SELECT $timeSeries AS t, IF(blob9 = 'true', 'success', 'failure') AS label, SUM(_sample_interval) AS count FROM gastown_events WHERE $timeFilter AND blob1 = 'container.agent_start_fetch' GROUP BY t, label ORDER BY t",
+ "refId": "A",
+ "round": "0s",
+ "showFormattedSQL": false,
+ "showHelp": false,
+ "skip_comments": true,
+ "table": "gastown_events",
+ "useWindowFuncForMacros": true
+ }
+ ],
+ "title": "Agent Start Attempts (success / failure)",
+ "type": "timeseries"
}
],
"preload": false,
diff --git a/services/gastown/src/db/tables/town-events.table.ts b/services/gastown/src/db/tables/town-events.table.ts
index 436aacecae..72771b8613 100644
--- a/services/gastown/src/db/tables/town-events.table.ts
+++ b/services/gastown/src/db/tables/town-events.table.ts
@@ -13,6 +13,7 @@ export const TownEventType = z.enum([
'nudge_timeout',
'pr_feedback_detected',
'pr_auto_merge',
+ 'pr_conflict_detected',
]);
export type TownEventType = z.output;
diff --git a/services/gastown/src/dos/Town.do.ts b/services/gastown/src/dos/Town.do.ts
index 75a654ccf1..e1a3155f33 100644
--- a/services/gastown/src/dos/Town.do.ts
+++ b/services/gastown/src/dos/Town.do.ts
@@ -72,6 +72,7 @@ import type {
BeadFilter,
Bead,
BeadStatus,
+ BeadType as BeadTypeType,
BeadPriority as BeadPriorityType,
RegisterAgentInput,
AgentFilter,
@@ -80,7 +81,6 @@ import type {
SendMailInput,
Mail,
ReviewQueueInput,
- ReviewQueueEntry,
AgentDoneInput,
PrimeContext,
Molecule,
@@ -343,6 +343,17 @@ export class TownDO extends DurableObject {
});
}
+ // Option B (defense-in-depth): If the reconciler re-dispatches an
+ // open triage batch bead (gt:triage, created_by='patrol') — e.g.
+ // because Option A's in_progress transition was somehow bypassed —
+ // inject the triage system prompt so the polecat gets the correct
+ // tools and instructions instead of the generic polecat prompt.
+ if (bead.labels.includes(patrol.TRIAGE_BATCH_LABEL) && bead.created_by === 'patrol') {
+ const pendingRequests = patrol.listPendingTriageRequests(this.sql);
+ const { buildTriageSystemPrompt } = await import('../prompts/triage-system.prompt');
+ systemPromptOverride = buildTriageSystemPrompt(pendingRequests);
+ }
+
return scheduling.dispatchAgent(schedulingCtx, agent, bead, {
systemPromptOverride,
});
@@ -786,6 +797,53 @@ export class TownDO extends DurableObject {
}
}
+ // Persist custom env_vars to DO storage so they survive container restarts.
+ // Compare against the previously-persisted set of keys to clear removed ones.
+ // Reserved infra keys are never overwritten or deleted — infra values always win.
+ const RESERVED_ENV_KEYS = new Set([
+ 'KILOCODE_TOKEN',
+ 'GIT_TOKEN',
+ 'GITHUB_TOKEN',
+ 'GITLAB_TOKEN',
+ 'GITLAB_INSTANCE_URL',
+ 'GITHUB_CLI_PAT',
+ 'GH_TOKEN',
+ 'GASTOWN_GIT_AUTHOR_NAME',
+ 'GASTOWN_GIT_AUTHOR_EMAIL',
+ 'GASTOWN_DISABLE_AI_COAUTHOR',
+ 'GASTOWN_ORGANIZATION_ID',
+ 'GASTOWN_CONTAINER_TOKEN',
+ 'GASTOWN_SESSION_TOKEN',
+ 'GASTOWN_API_URL',
+ ]);
+ const CUSTOM_ENV_KEYS_STORAGE_KEY = 'container:custom_env_var_keys';
+ const prevCustomKeys: string[] =
+ (await this.ctx.storage.get(CUSTOM_ENV_KEYS_STORAGE_KEY)) ?? [];
+ const newCustomKeys = Object.keys(townConfig.env_vars).filter(
+ key => !RESERVED_ENV_KEYS.has(key)
+ );
+ const newCustomKeySet = new Set(newCustomKeys);
+
+ for (const key of prevCustomKeys) {
+ if (RESERVED_ENV_KEYS.has(key)) continue;
+ if (!newCustomKeySet.has(key)) {
+ try {
+ await container.deleteEnvVar(key);
+ } catch (err) {
+ console.warn(`[Town.do] syncConfigToContainer: delete custom ${key} failed:`, err);
+ }
+ }
+ }
+ for (const [key, value] of Object.entries(townConfig.env_vars)) {
+ if (RESERVED_ENV_KEYS.has(key)) continue;
+ try {
+ await container.setEnvVar(key, value);
+ } catch (err) {
+ console.warn(`[Town.do] syncConfigToContainer: set custom ${key} failed:`, err);
+ }
+ }
+ await this.ctx.storage.put(CUSTOM_ENV_KEYS_STORAGE_KEY, newCustomKeys);
+
// Phase 2: Push to the running container's process.env via the
// /sync-config endpoint. The X-Town-Config header delivers the
// full config; the endpoint applies CONFIG_ENV_MAP to process.env.
@@ -1076,6 +1134,33 @@ export class TownDO extends DurableObject {
beadOps.deleteBead(this.sql, beadId);
}
+ async deleteBeads(beadIds: string[]): Promise {
+ return beadOps.deleteBeads(this.sql, beadIds);
+ }
+
+ async deleteBeadsByStatus(
+ status: BeadStatus,
+ type?: BeadTypeType,
+ rigId?: string
+ ): Promise {
+ if (rigId) {
+ const rigBeads = BeadRecord.pick({ bead_id: true })
+ .array()
+ .parse([
+ ...this.sql.exec(
+ /* sql */ `SELECT ${beads.bead_id} FROM ${beads} WHERE ${beads.rig_id} = ? AND ${beads.status} = ?${type ? ` AND ${beads.type} = ?` : ''}`,
+ ...(type ? [rigId, status, type] : [rigId, status])
+ ),
+ ]);
+ if (rigBeads.length === 0) return 0;
+ return beadOps.deleteBeads(
+ this.sql,
+ rigBeads.map(r => r.bead_id)
+ );
+ }
+ return beadOps.deleteBeadsByStatus(this.sql, status, type);
+ }
+
async listBeadEvents(options: {
beadId?: string;
since?: string;
@@ -1098,6 +1183,7 @@ export class TownDO extends DurableObject {
labels: string[];
status: BeadStatus;
metadata: Record;
+ depends_on: string[];
}>,
actorId: string
): Promise {
@@ -1110,7 +1196,12 @@ export class TownDO extends DurableObject {
});
}
- const bead = beadOps.updateBeadFields(this.sql, beadId, fields, actorId);
+ const { depends_on, ...beadFields } = fields;
+ const bead = beadOps.updateBeadFields(this.sql, beadId, beadFields, actorId);
+
+ if (depends_on !== undefined) {
+ beadOps.setDependencies(this.sql, beadId, depends_on);
+ }
// When a bead closes via field update, check for newly unblocked beads
if (fields.status === 'closed' || fields.status === 'failed') {
@@ -1120,6 +1211,67 @@ export class TownDO extends DurableObject {
return bead;
}
+ /** Add an existing bead to a convoy's tracking. Returns updated convoy metadata. */
+ async convoyAddBead(
+ convoyId: string,
+ beadId: string,
+ dependsOn?: string[]
+ ): Promise<{ total_beads: number }> {
+ const convoyRecord = this.getConvoyRecord(convoyId);
+ if (!convoyRecord) throw new Error(`Bead ${convoyId} is not a convoy`);
+ const convoy = toConvoy(convoyRecord);
+ if (!convoy.staged) throw new Error(`Cannot add beads to a non-staged convoy: ${convoyId}`);
+ if (convoy.status === 'landed')
+ throw new Error(`Cannot add beads to a closed convoy: ${convoyId}`);
+ if (convoyRecord.status === 'failed')
+ throw new Error(`Cannot add beads to a failed convoy: ${convoyId}`);
+ beadOps.convoyAddBead(this.sql, convoyId, beadId);
+ if (dependsOn !== undefined) {
+ beadOps.setDependencies(this.sql, beadId, dependsOn);
+ }
+ const rows = [
+ ...query(
+ this.sql,
+ /* sql */ `
+ SELECT ${convoy_metadata.total_beads}
+ FROM ${convoy_metadata}
+ WHERE ${convoy_metadata.bead_id} = ?
+ `,
+ [convoyId]
+ ),
+ ];
+ const parsed = z.object({ total_beads: z.number() }).array().parse(rows);
+ const total = parsed[0]?.total_beads ?? 0;
+ return { total_beads: total };
+ }
+
+ /** Remove a bead from a convoy's tracking. Returns updated convoy metadata. */
+ async convoyRemoveBead(convoyId: string, beadId: string): Promise<{ total_beads: number }> {
+ const convoyCheck = [
+ ...query(
+ this.sql,
+ /* sql */ `SELECT 1 FROM ${convoy_metadata} WHERE ${convoy_metadata.bead_id} = ?`,
+ [convoyId]
+ ),
+ ];
+ if (convoyCheck.length === 0) throw new Error(`Bead ${convoyId} is not a convoy`);
+ beadOps.convoyRemoveBead(this.sql, convoyId, beadId);
+ const rows = [
+ ...query(
+ this.sql,
+ /* sql */ `
+ SELECT ${convoy_metadata.total_beads}
+ FROM ${convoy_metadata}
+ WHERE ${convoy_metadata.bead_id} = ?
+ `,
+ [convoyId]
+ ),
+ ];
+ const parsed = z.object({ total_beads: z.number() }).array().parse(rows);
+ const total = parsed[0]?.total_beads ?? 0;
+ return { total_beads: total };
+ }
+
/**
* Force-reset an agent to idle, unhooking from its current bead if any.
* Sets the bead status back to 'open' so it can be re-dispatched.
@@ -1670,14 +1822,6 @@ export class TownDO extends DurableObject {
await this.escalateToActiveCadence();
}
- async popReviewQueue(): Promise {
- return reviewQueue.popReviewQueue(this.sql);
- }
-
- async completeReview(entryId: string, status: 'merged' | 'failed'): Promise {
- reviewQueue.completeReview(this.sql, entryId, status);
- }
-
async completeReviewWithResult(input: {
entry_id: string;
status: 'merged' | 'failed' | 'conflict';
@@ -1712,10 +1856,9 @@ export class TownDO extends DurableObject {
});
}
- // Rework is handled by the normal scheduling path: the failed/conflict
+ // Rework is handled by the reconciler's scheduling path: the failed/conflict
// path in completeReviewWithResult sets the source bead to 'open' with
- // assignee cleared. feedStrandedConvoys or rehookOrphanedBeads will
- // hook a polecat, and schedulePendingWork will dispatch it.
+ // assignee cleared. The reconciler will hook a polecat and dispatch it.
}
async agentDone(agentId: string, input: AgentDoneInput): Promise {
@@ -2362,29 +2505,33 @@ export class TownDO extends DurableObject {
}
}
- const started = await dispatch.startAgentInContainer(this.env, this.ctx.storage, {
- townId,
- rigId: `mayor-${townId}`,
- userId: townConfig.owner_user_id ?? rigConfig?.userId ?? townId,
- agentId: mayor.id,
- agentName: 'mayor',
- role: 'mayor',
- identity: mayor.identity,
- beadId: '',
- beadTitle: combinedMessage,
- beadBody: '',
- checkpoint: agents.readCheckpoint(this.sql, mayor.id),
- // conversationHistory is no longer needed — the mayor's kilo.db
- // is persisted to KV and hydrated on boot, preserving the full
- // session state across container evictions.
- gitUrl: rigConfig?.gitUrl ?? '',
- defaultBranch: rigConfig?.defaultBranch ?? 'main',
- kilocodeToken,
- townConfig,
- rigs: await this.rigListForMayor(),
- });
+ const { started: mayorStarted } = await dispatch.startAgentInContainer(
+ this.env,
+ this.ctx.storage,
+ {
+ townId,
+ rigId: `mayor-${townId}`,
+ userId: townConfig.owner_user_id ?? rigConfig?.userId ?? townId,
+ agentId: mayor.id,
+ agentName: 'mayor',
+ role: 'mayor',
+ identity: mayor.identity,
+ beadId: '',
+ beadTitle: combinedMessage,
+ beadBody: '',
+ checkpoint: agents.readCheckpoint(this.sql, mayor.id),
+ // conversationHistory is no longer needed — the mayor's kilo.db
+ // is persisted to KV and hydrated on boot, preserving the full
+ // session state across container evictions.
+ gitUrl: rigConfig?.gitUrl ?? '',
+ defaultBranch: rigConfig?.defaultBranch ?? 'main',
+ kilocodeToken,
+ townConfig,
+ rigs: await this.rigListForMayor(),
+ }
+ );
- if (started) {
+ if (mayorStarted) {
agents.updateAgentStatus(this.sql, mayor.id, 'working');
this._mayorWorkingSince = Date.now();
sessionStatus = 'starting';
@@ -2466,29 +2613,33 @@ export class TownDO extends DurableObject {
// Start with an empty prompt — the mayor will be idle but its container
// and SDK server will be running, ready for PTY connections.
- const started = await dispatch.startAgentInContainer(this.env, this.ctx.storage, {
- townId,
- rigId: `mayor-${townId}`,
- userId:
- townConfig.owner_user_id ?? rigConfig?.userId ?? townConfig.created_by_user_id ?? townId,
- agentId: mayor.id,
- agentName: 'mayor',
- role: 'mayor',
- identity: mayor.identity,
- beadId: '',
- beadTitle: 'Mayor ready. Waiting for instructions.',
- beadBody: '',
- checkpoint: agents.readCheckpoint(this.sql, mayor.id),
- // conversationHistory is no longer needed — kilo.db persistence
- // handles session continuity across container evictions.
- gitUrl: rigConfig?.gitUrl ?? '',
- defaultBranch: rigConfig?.defaultBranch ?? 'main',
- kilocodeToken,
- townConfig,
- rigs: await this.rigListForMayor(),
- });
+ const { started: mayorStarted } = await dispatch.startAgentInContainer(
+ this.env,
+ this.ctx.storage,
+ {
+ townId,
+ rigId: `mayor-${townId}`,
+ userId:
+ townConfig.owner_user_id ?? rigConfig?.userId ?? townConfig.created_by_user_id ?? townId,
+ agentId: mayor.id,
+ agentName: 'mayor',
+ role: 'mayor',
+ identity: mayor.identity,
+ beadId: '',
+ beadTitle: 'Mayor ready. Waiting for instructions.',
+ beadBody: '',
+ checkpoint: agents.readCheckpoint(this.sql, mayor.id),
+ // conversationHistory is no longer needed — kilo.db persistence
+ // handles session continuity across container evictions.
+ gitUrl: rigConfig?.gitUrl ?? '',
+ defaultBranch: rigConfig?.defaultBranch ?? 'main',
+ kilocodeToken,
+ townConfig,
+ rigs: await this.rigListForMayor(),
+ }
+ );
- if (started) {
+ if (mayorStarted) {
agents.updateAgentStatus(this.sql, mayor.id, 'working');
this._mayorWorkingSince = Date.now();
return { agentId: mayor.id, sessionStatus: 'starting' };
@@ -3300,6 +3451,14 @@ export class TownDO extends DurableObject {
return toConvoy(ConvoyBeadRecord.parse(rows[0]));
}
+ private getConvoyRecord(convoyId: string): ConvoyBeadRecord | null {
+ const rows = [
+ ...query(this.sql, /* sql */ `${CONVOY_JOIN} WHERE ${beads.bead_id} = ?`, [convoyId]),
+ ];
+ if (rows.length === 0) return null;
+ return ConvoyBeadRecord.parse(rows[0]);
+ }
+
// ══════════════════════════════════════════════════════════════════
// Escalations (beads with type='escalation' + escalation_metadata)
// ══════════════════════════════════════════════════════════════════
@@ -3558,9 +3717,9 @@ export class TownDO extends DurableObject {
}
// ── Pre-phase: Observe container status for working agents ────────
- // Replaces witnessPatrol's zombie detection. Poll the container for
- // each working/stalled agent and emit container_status events. These
- // are drained in Phase 0 and applied before reconciliation.
+ // Poll the container for each working/stalled agent and emit
+ // container_status events. These are drained in Phase 0 and applied
+ // before reconciliation.
try {
const workingAgentRows = z
.object({ bead_id: z.string() })
@@ -3624,6 +3783,11 @@ export class TownDO extends DurableObject {
pendingEventCount: 0,
};
+ // Fetch town config once and share across Phase 0 and Phase 1 so that
+ // applyEvent can use the full fallback chain (rig → town → default) for
+ // settings like auto_resolve_merge_conflicts.
+ const townConfig = await this.getTownConfig();
+
// Phase 0: Drain events and apply state transitions
try {
const pending = events.drainEvents(this.sql);
@@ -3633,7 +3797,7 @@ export class TownDO extends DurableObject {
}
for (const event of pending) {
try {
- reconciler.applyEvent(this.sql, event);
+ reconciler.applyEvent(this.sql, event, { townConfig });
events.markProcessed(this.sql, event.event_id);
} catch (err) {
logger.error('reconciler: applyEvent failed', {
@@ -3674,7 +3838,6 @@ export class TownDO extends DurableObject {
// Phase 1: Reconcile — compute desired state vs actual state
const sideEffects: Array<() => Promise> = [];
try {
- const townConfig = await this.getTownConfig();
const actions = reconciler.reconcile(this.sql, {
draining: this._draining,
townConfig,
@@ -4064,6 +4227,9 @@ export class TownDO extends DurableObject {
const systemPrompt = buildTriageSystemPrompt(pendingRequests);
// Only now create the synthetic bead — preconditions are verified.
+ // Set rig_id so that if Rule 3 resets this bead to 'open' after a
+ // dispatch timeout, Rule 1 of the reconciler can pick it up and
+ // re-dispatch it (with the correct triage system prompt via Option B).
const triageBead = beadOps.createBead(this.sql, {
type: 'issue',
title: `Triage batch: ${pendingCount} request(s)`,
@@ -4071,33 +4237,46 @@ export class TownDO extends DurableObject {
priority: 'high',
labels: [patrol.TRIAGE_BATCH_LABEL],
created_by: 'patrol',
+ rig_id: rigId,
});
const triageAgent = agents.getOrCreateAgent(this.sql, 'polecat', rigId, this.townId);
agents.hookBead(this.sql, triageAgent.id, triageBead.bead_id);
- const started = await dispatch.startAgentInContainer(this.env, this.ctx.storage, {
- townId: this.townId,
- rigId,
- userId: rigConfig.userId,
- agentId: triageAgent.id,
- agentName: triageAgent.name,
- role: 'polecat',
- identity: triageAgent.identity,
- beadId: triageBead.bead_id,
- beadTitle: triageBead.title,
- beadBody: triageBead.body ?? '',
- checkpoint: null,
- gitUrl: rigConfig.gitUrl,
- defaultBranch: rigConfig.defaultBranch,
- kilocodeToken,
- townConfig,
- systemPromptOverride: systemPrompt,
- platformIntegrationId: rigConfig.platformIntegrationId,
- lightweight: true,
- });
+ // Option A: Immediately mark the triage batch bead as in_progress so
+ // the reconciler's Rule 2 (idle agent + open hooked bead → dispatch_agent)
+ // does not re-fire on the next tick if the container start fails. Rule 3
+ // (stale in_progress bead + no working agent + 5-min timeout) will reset
+ // it back to open if the dispatch fails, allowing a clean retry via
+ // maybeDispatchTriageAgent with the correct triage system prompt.
+ beadOps.updateBeadStatus(this.sql, triageBead.bead_id, 'in_progress', triageAgent.id);
+
+ const { started: triageStarted } = await dispatch.startAgentInContainer(
+ this.env,
+ this.ctx.storage,
+ {
+ townId: this.townId,
+ rigId,
+ userId: rigConfig.userId,
+ agentId: triageAgent.id,
+ agentName: triageAgent.name,
+ role: 'polecat',
+ identity: triageAgent.identity,
+ beadId: triageBead.bead_id,
+ beadTitle: triageBead.title,
+ beadBody: triageBead.body ?? '',
+ checkpoint: null,
+ gitUrl: rigConfig.gitUrl,
+ defaultBranch: rigConfig.defaultBranch,
+ kilocodeToken,
+ townConfig,
+ systemPromptOverride: systemPrompt,
+ platformIntegrationId: rigConfig.platformIntegrationId,
+ lightweight: true,
+ }
+ );
- if (started) {
+ if (triageStarted) {
// Mark the agent as working so the duplicate-guard on the next
// alarm tick sees it and skips dispatch.
agents.updateAgentStatus(this.sql, triageAgent.id, 'working');
@@ -4278,12 +4457,55 @@ export class TownDO extends DurableObject {
headers['X-Drain-Nonce'] = this._drainNonce;
headers['X-Town-Id'] = townId;
}
- await container.fetch('http://container/health', {
- signal: AbortSignal.timeout(5_000),
- headers,
- });
+ const t0 = Date.now();
+ try {
+ const healthResp = await container.fetch('http://container/health', {
+ signal: AbortSignal.timeout(5_000),
+ headers,
+ });
+ const durationMs = Date.now() - t0;
+ if (!healthResp.ok) {
+ writeEvent(this.env, {
+ event: 'container.health_ping',
+ townId,
+ durationMs,
+ statusCode: healthResp.status,
+ error: `non-ok status ${healthResp.status}`,
+ });
+ } else {
+ writeEvent(this.env, {
+ event: 'container.health_ping',
+ townId,
+ durationMs,
+ statusCode: healthResp.status,
+ });
+ const rawBody: unknown = await healthResp.json().catch(() => null);
+ const HealthBody = z
+ .object({ startedAt: z.string().optional(), uptime: z.number().optional() })
+ .passthrough();
+ const body = HealthBody.safeParse(rawBody);
+ if (body.success && body.data.startedAt) {
+ const containerStartedAt = new Date(body.data.startedAt).getTime();
+ writeEvent(this.env, {
+ event: 'container.ready_observed',
+ townId,
+ containerStartedAt: body.data.startedAt,
+ durationMs: Date.now() - containerStartedAt,
+ });
+ }
+ }
+ } catch {
+ const durationMs = Date.now() - t0;
+ writeEvent(this.env, {
+ event: 'container.health_ping',
+ townId,
+ durationMs,
+ error: 'timeout',
+ });
+ // Container is starting up or unavailable — alarm will retry
+ }
} catch {
- // Container is starting up or unavailable — alarm will retry
+ // Outer try: buildContainerConfig or getTownContainerStub failed
}
}
@@ -4487,8 +4709,8 @@ export class TownDO extends DurableObject {
// Only count idle+hooked agents as orphaned if they've been idle for
// longer than the dispatch cooldown. Agents that were just hooked by
- // feedStrandedConvoys or restarted with backoff are legitimately
- // waiting for the next scheduler tick.
+ // the reconciler or restarted with backoff are legitimately waiting
+ // for the next scheduler tick.
const orphanedHooks = Number(
[
...query(
diff --git a/services/gastown/src/dos/town/actions.ts b/services/gastown/src/dos/town/actions.ts
index d7fdfee532..a31475fa4c 100644
--- a/services/gastown/src/dos/town/actions.ts
+++ b/services/gastown/src/dos/town/actions.ts
@@ -22,6 +22,7 @@ import * as reviewQueue from './review-queue';
import * as patrol from './patrol';
import { getRig } from './rigs';
import { parseGitUrl } from '../../util/platform-pr.util';
+import type { PRStatusResult } from './town-scm';
// ── Bead mutations ──────────────────────────────────────────────────
@@ -132,6 +133,12 @@ const CloseConvoy = z.object({
convoy_id: z.string(),
});
+const FailConvoy = z.object({
+ type: z.literal('fail_convoy'),
+ convoy_id: z.string(),
+ reason: z.string(),
+});
+
// ── Side effects (deferred) ─────────────────────────────────────────
const DispatchAgent = z.object({
@@ -206,6 +213,7 @@ export const Action = z.discriminatedUnion('type', [
UpdateConvoyProgress,
SetConvoyReadyToLand,
CloseConvoy,
+ FailConvoy,
// Side effects
DispatchAgent,
StopAgent,
@@ -239,6 +247,7 @@ export type DeleteAgent = z.infer;
export type UpdateConvoyProgress = z.infer;
export type SetConvoyReadyToLand = z.infer;
export type CloseConvoy = z.infer;
+export type FailConvoy = z.infer;
export type DispatchAgent = z.infer;
export type StopAgent = z.infer;
export type PollPr = z.infer;
@@ -271,8 +280,8 @@ export type ApplyActionContext = {
dispatchAgent: (agentId: string, beadId: string, rigId: string) => Promise;
/** Stop an agent's container process. */
stopAgent: (agentId: string) => Promise;
- /** Check a PR's status via GitHub/GitLab API. Returns 'open'|'merged'|'closed'|null. */
- checkPRStatus: (prUrl: string) => Promise<'open' | 'merged' | 'closed' | null>;
+ /** Check a PR's status via GitHub/GitLab API. Returns PRStatusResult or null. */
+ checkPRStatus: (prUrl: string) => Promise;
/** Check PR for unresolved review comments and failing CI checks. */
checkPRFeedback: (prUrl: string) => Promise;
/** Merge a PR via GitHub/GitLab API. */
@@ -397,7 +406,22 @@ export function applyAction(ctx: ApplyActionContext, action: Action): (() => Pro
}
case 'create_landing_mr': {
- // Create an MR bead for the landing merge (feature branch → main)
+ const timestamp = now();
+ query(
+ sql,
+ /* sql */ `
+ UPDATE ${beads}
+ SET ${beads.columns.metadata} = json_set(
+ COALESCE(${beads.columns.metadata}, '{}'),
+ '$.landing_mr_attempts',
+ COALESCE(json_extract(${beads.columns.metadata}, '$.landing_mr_attempts'), 0) + 1,
+ '$.last_landing_mr_attempt_at', ?
+ ),
+ ${beads.columns.updated_at} = ?
+ WHERE ${beads.bead_id} = ?
+ `,
+ [timestamp, timestamp, action.convoy_id]
+ );
reviewQueue.submitToReviewQueue(sql, {
agent_id: 'system',
bead_id: action.convoy_id,
@@ -592,7 +616,6 @@ export function applyAction(ctx: ApplyActionContext, action: Action): (() => Pro
}
case 'close_convoy': {
- // Use updateBeadStatus for terminal state guard + bead event logging
beadOps.updateBeadStatus(sql, action.convoy_id, 'closed', 'system');
query(
sql,
@@ -606,6 +629,25 @@ export function applyAction(ctx: ApplyActionContext, action: Action): (() => Pro
return null;
}
+ case 'fail_convoy': {
+ beadOps.updateBeadStatus(sql, action.convoy_id, 'failed', 'system');
+ query(
+ sql,
+ /* sql */ `
+ UPDATE ${beads}
+ SET ${beads.columns.metadata} = json_set(
+ COALESCE(${beads.columns.metadata}, '{}'),
+ '$.failureReason', 'landing_mr_exhausted',
+ '$.failureMessage', ?
+ ),
+ ${beads.columns.updated_at} = ?
+ WHERE ${beads.bead_id} = ?
+ `,
+ [action.reason, now(), action.convoy_id]
+ );
+ return null;
+ }
+
// ── Side effects (deferred) ─────────────────────────────────
case 'dispatch_agent': {
@@ -683,8 +725,8 @@ export function applyAction(ctx: ApplyActionContext, action: Action): (() => Pro
return async () => {
try {
- const status = await ctx.checkPRStatus(action.pr_url);
- if (status !== null) {
+ const prStatusResult = await ctx.checkPRStatus(action.pr_url);
+ if (prStatusResult !== null) {
// Any non-null result resets the consecutive null counter
query(
sql,
@@ -698,6 +740,7 @@ export function applyAction(ctx: ApplyActionContext, action: Action): (() => Pro
`,
[action.bead_id]
);
+ const { status, mergeable_state } = prStatusResult;
if (status !== 'open') {
ctx.insertEvent('pr_status_changed', {
bead_id: action.bead_id,
@@ -711,6 +754,125 @@ export function applyAction(ctx: ApplyActionContext, action: Action): (() => Pro
const refineryConfig = townConfig.refinery;
if (!refineryConfig) return;
+ if (mergeable_state === 'unknown') {
+ // GitHub is still computing mergeability — skip this poll and
+ // check again on the next tick. Do NOT treat 'unknown' as clean
+ // or dirty to avoid prematurely clearing has_conflicts or
+ // emitting pr_conflict_detected before GitHub has a definitive answer.
+ return;
+ }
+
+ if (mergeable_state === 'dirty') {
+ // PR has merge conflicts — emit event ONCE per conflict episode.
+ // The reconciler decides whether to create a conflict bead or an escalation
+ // based on the rig's auto_resolve_merge_conflicts config.
+ const conflictMetaRows = z
+ .object({ has_conflicts: z.unknown() })
+ .array()
+ .parse([
+ ...query(
+ sql,
+ /* sql */ `
+ SELECT json_extract(${beads.columns.metadata}, '$.has_conflicts') AS has_conflicts
+ FROM ${beads}
+ WHERE ${beads.bead_id} = ?
+ `,
+ [action.bead_id]
+ ),
+ ]);
+ const alreadyMarked =
+ conflictMetaRows[0]?.has_conflicts === 1 ||
+ conflictMetaRows[0]?.has_conflicts === true;
+
+ if (!alreadyMarked) {
+ // Mark conflict on MR bead metadata
+ query(
+ sql,
+ /* sql */ `
+ UPDATE ${beads}
+ SET ${beads.columns.metadata} = json_set(
+ COALESCE(${beads.columns.metadata}, '{}'),
+ '$.has_conflicts', 1,
+ '$.conflicts_detected_at', ?
+ ),
+ ${beads.columns.updated_at} = ?
+ WHERE ${beads.bead_id} = ?
+ `,
+ [now(), now(), action.bead_id]
+ );
+
+ // Get MR bead source bead ID and branch for the event payload
+ const mrMetaRows = z
+ .object({ source_bead_id: z.string().nullable(), branch: z.string().nullable() })
+ .array()
+ .parse([
+ ...query(
+ sql,
+ /* sql */ `
+ SELECT
+ json_extract(${beads.columns.metadata}, '$.source_bead_id') AS source_bead_id,
+ ${review_metadata.columns.branch} AS branch
+ FROM ${beads}
+ INNER JOIN ${review_metadata} ON ${review_metadata.bead_id} = ${beads.bead_id}
+ WHERE ${beads.bead_id} = ?
+ `,
+ [action.bead_id]
+ ),
+ ]);
+ const sourceBead = mrMetaRows[0]?.source_bead_id ?? null;
+ const conflictBranch = mrMetaRows[0]?.branch ?? '';
+
+ ctx.insertEvent('pr_conflict_detected', {
+ bead_id: action.bead_id,
+ payload: {
+ mr_bead_id: action.bead_id,
+ source_bead_id: sourceBead,
+ pr_url: action.pr_url,
+ branch: conflictBranch,
+ },
+ });
+ }
+
+ // A dirty PR must not proceed to the auto-merge timer — reset the
+ // grace-period clock so the timer starts fresh once conflicts are resolved.
+ query(
+ sql,
+ /* sql */ `
+ UPDATE ${review_metadata}
+ SET ${review_metadata.columns.auto_merge_ready_since} = NULL
+ WHERE ${review_metadata.bead_id} = ?
+ AND ${review_metadata.columns.auto_merge_ready_since} IS NOT NULL
+ `,
+ [action.bead_id]
+ );
+ return;
+ } else if (
+ mergeable_state === 'clean' ||
+ mergeable_state === 'blocked' ||
+ mergeable_state === 'has_hooks'
+ ) {
+ // Conflict definitively resolved — clear the has_conflicts flag.
+ // 'clean': no conflicts, all checks pass.
+ // 'blocked': no conflicts but checks are failing (e.g. required reviews).
+ // 'has_hooks': no conflicts but pre-receive hooks are pending.
+ // 'unknown' is handled above (GitHub still computing — retry next poll).
+ query(
+ sql,
+ /* sql */ `
+ UPDATE ${beads}
+ SET ${beads.columns.metadata} = json_remove(
+ COALESCE(${beads.columns.metadata}, '{}'),
+ '$.has_conflicts',
+ '$.conflicts_detected_at'
+ ),
+ ${beads.columns.updated_at} = ?
+ WHERE ${beads.bead_id} = ?
+ AND json_extract(${beads.columns.metadata}, '$.has_conflicts') IS NOT NULL
+ `,
+ [now(), action.bead_id]
+ );
+ }
+
const wantsAutoResolve = refineryConfig.auto_resolve_pr_feedback === true;
const wantsAutoMerge =
refineryConfig.auto_merge !== false &&
@@ -736,10 +898,10 @@ export function applyAction(ctx: ApplyActionContext, action: Action): (() => Pro
// If the PR was merged externally during that window, inserting
// pr_feedback_detected would create a feedback bead for a merged
// PR — leading to a duplicate PR on an already-merged branch.
- const freshStatus = await ctx.checkPRStatus(action.pr_url);
- if (freshStatus !== 'open') {
+ const freshStatusResult = await ctx.checkPRStatus(action.pr_url);
+ if (freshStatusResult?.status !== 'open') {
console.log(
- `${LOG} poll_pr: PR status changed to '${freshStatus}' during feedback check, skipping feedback for bead=${action.bead_id}`
+ `${LOG} poll_pr: PR status changed to '${freshStatusResult?.status ?? 'null'}' during feedback check, skipping feedback for bead=${action.bead_id}`
);
} else {
const existingFeedback = hasExistingFeedbackBead(sql, action.bead_id);
diff --git a/services/gastown/src/dos/town/agents.ts b/services/gastown/src/dos/town/agents.ts
index 48d963f205..cbcefc468f 100644
--- a/services/gastown/src/dos/town/agents.ts
+++ b/services/gastown/src/dos/town/agents.ts
@@ -202,7 +202,21 @@ export function updateAgentStatus(sql: SqlStorage, agentId: string, status: stri
}
export function deleteAgent(sql: SqlStorage, agentId: string): void {
- // Unassign beads that reference this agent
+ // Clear assignee on terminal beads (closed/failed) without reopening them.
+ query(
+ sql,
+ /* sql */ `
+ UPDATE ${beads}
+ SET ${beads.columns.assignee_agent_bead_id} = NULL,
+ ${beads.columns.updated_at} = ?
+ WHERE ${beads.assignee_agent_bead_id} = ?
+ AND ${beads.columns.status} IN ('closed', 'failed')
+ `,
+ [now(), agentId]
+ );
+
+ // Reopen non-terminal beads assigned to this agent so the reconciler
+ // can re-dispatch them.
query(
sql,
/* sql */ `
@@ -211,6 +225,7 @@ export function deleteAgent(sql: SqlStorage, agentId: string): void {
${beads.columns.status} = 'open',
${beads.columns.updated_at} = ?
WHERE ${beads.assignee_agent_bead_id} = ?
+ AND ${beads.columns.status} NOT IN ('closed', 'failed')
`,
[now(), agentId]
);
@@ -504,6 +519,33 @@ export function prime(sql: SqlStorage, agentId: string): PrimeContext {
};
}
+ // Build PR conflict context if the hooked bead is a PR conflict resolution request,
+ // or if it is a PR feedback bead that has also accumulated merge conflicts.
+ let pr_conflict_context: PrimeContext['pr_conflict_context'] = null;
+ if (hookedBead?.labels.includes('gt:pr-conflict') && hookedBead.metadata) {
+ const meta = hookedBead.metadata as Record;
+ pr_conflict_context = {
+ pr_url: typeof meta.pr_url === 'string' ? meta.pr_url : null,
+ branch: typeof meta.branch === 'string' ? meta.branch : null,
+ target_branch: typeof meta.target_branch === 'string' ? meta.target_branch : null,
+ has_feedback: meta.has_feedback === true || meta.has_feedback === 1,
+ };
+ } else if (hookedBead?.labels.includes('gt:pr-feedback') && hookedBead.metadata) {
+ // A feedback bead can also have has_conflicts: true when a conflict was detected
+ // after the feedback bead was already created. Surface the conflict context so the
+ // agent resolves conflicts first, then addresses review feedback.
+ const meta = hookedBead.metadata as Record;
+ if (meta.has_conflicts === true || meta.has_conflicts === 1) {
+ pr_conflict_context = {
+ pr_url: typeof meta.pr_url === 'string' ? meta.pr_url : null,
+ branch: typeof meta.branch === 'string' ? meta.branch : null,
+ target_branch:
+ typeof meta.conflict_target_branch === 'string' ? meta.conflict_target_branch : null,
+ has_feedback: true,
+ };
+ }
+ }
+
return {
agent,
hooked_bead: hookedBead,
@@ -511,6 +553,7 @@ export function prime(sql: SqlStorage, agentId: string): PrimeContext {
open_beads: openBeads,
rework_context,
pr_fixup_context,
+ pr_conflict_context,
};
}
diff --git a/services/gastown/src/dos/town/beads.ts b/services/gastown/src/dos/town/beads.ts
index 25d76d8a3b..bb4bc73c15 100644
--- a/services/gastown/src/dos/town/beads.ts
+++ b/services/gastown/src/dos/town/beads.ts
@@ -421,8 +421,8 @@ export function updateConvoyProgress(sql: SqlStorage, beadId: string, timestamp:
if (featureBranch && mergeMode === 'review-then-land') {
// Mark the convoy as ready to land by storing a flag in metadata.
- // The alarm loop's processReviewQueue will detect this and create
- // the final landing MR (feature branch → main).
+ // The reconciler will detect this and create the final landing
+ // MR (feature branch → main).
query(
sql,
/* sql */ `
@@ -530,6 +530,178 @@ export function insertDependency(
);
}
+/**
+ * Atomically replace all 'blocks' edges for a bead.
+ * Deletes existing blockers then inserts the provided list.
+ * Self-loops are silently skipped.
+ */
+export function setDependencies(sql: SqlStorage, beadId: string, dependsOnBeadIds: string[]): void {
+ query(
+ sql,
+ /* sql */ `
+ DELETE FROM ${bead_dependencies}
+ WHERE ${bead_dependencies.bead_id} = ?
+ AND ${bead_dependencies.dependency_type} = 'blocks'
+ `,
+ [beadId]
+ );
+ for (const depId of dependsOnBeadIds) {
+ if (depId === beadId) continue; // no self-loops
+ query(
+ sql,
+ /* sql */ `
+ INSERT OR IGNORE INTO ${bead_dependencies} (
+ ${bead_dependencies.columns.bead_id},
+ ${bead_dependencies.columns.depends_on_bead_id},
+ ${bead_dependencies.columns.dependency_type}
+ ) VALUES (?, ?, 'blocks')
+ `,
+ [beadId, depId]
+ );
+ }
+}
+
+/**
+ * Add a bead to a convoy's tracking.
+ * Inserts a 'tracks' edge and increments total_beads — but only when the
+ * edge is genuinely new (INSERT OR IGNORE returns 0 changes if it already
+ * exists, so we check before updating the counter).
+ * Returns whether the bead was newly added (true) or already tracked (false).
+ */
+export function convoyAddBead(sql: SqlStorage, convoyId: string, beadId: string): boolean {
+ // Check if already tracked
+ const existing = [
+ ...query(
+ sql,
+ /* sql */ `
+ SELECT 1 FROM ${bead_dependencies}
+ WHERE ${bead_dependencies.bead_id} = ?
+ AND ${bead_dependencies.depends_on_bead_id} = ?
+ AND ${bead_dependencies.dependency_type} = 'tracks'
+ `,
+ [beadId, convoyId]
+ ),
+ ];
+ if (existing.length > 0) return false;
+
+ query(
+ sql,
+ /* sql */ `
+ INSERT INTO ${bead_dependencies} (
+ ${bead_dependencies.columns.bead_id},
+ ${bead_dependencies.columns.depends_on_bead_id},
+ ${bead_dependencies.columns.dependency_type}
+ ) VALUES (?, ?, 'tracks')
+ `,
+ [beadId, convoyId]
+ );
+ query(
+ sql,
+ /* sql */ `
+ UPDATE ${convoy_metadata}
+ SET ${convoy_metadata.columns.total_beads} = ${convoy_metadata.columns.total_beads} + 1
+ WHERE ${convoy_metadata.bead_id} = ?
+ `,
+ [convoyId]
+ );
+ return true;
+}
+
+/**
+ * Remove a bead from a convoy's tracking.
+ * Deletes the 'tracks' edge, decrements total_beads (floor 0), and removes
+ * any 'blocks' edges between this bead and other beads in the same convoy.
+ * Returns whether the bead was actually tracked (true) or not found (false).
+ */
+export function convoyRemoveBead(sql: SqlStorage, convoyId: string, beadId: string): boolean {
+ // Check if tracked
+ const existing = [
+ ...query(
+ sql,
+ /* sql */ `
+ SELECT 1 FROM ${bead_dependencies}
+ WHERE ${bead_dependencies.bead_id} = ?
+ AND ${bead_dependencies.depends_on_bead_id} = ?
+ AND ${bead_dependencies.dependency_type} = 'tracks'
+ `,
+ [beadId, convoyId]
+ ),
+ ];
+ if (existing.length === 0) return false;
+
+ // Remove the tracks edge
+ query(
+ sql,
+ /* sql */ `
+ DELETE FROM ${bead_dependencies}
+ WHERE ${bead_dependencies.bead_id} = ?
+ AND ${bead_dependencies.depends_on_bead_id} = ?
+ AND ${bead_dependencies.dependency_type} = 'tracks'
+ `,
+ [beadId, convoyId]
+ );
+ // Decrement total_beads, floor at 0
+ query(
+ sql,
+ /* sql */ `
+ UPDATE ${convoy_metadata}
+ SET ${convoy_metadata.columns.total_beads} = MAX(0, ${convoy_metadata.columns.total_beads} - 1)
+ WHERE ${convoy_metadata.bead_id} = ?
+ `,
+ [convoyId]
+ );
+
+ // Find all other beads tracked by this convoy
+ const siblingRows = [
+ ...query(
+ sql,
+ /* sql */ `
+ SELECT ${bead_dependencies.bead_id}
+ FROM ${bead_dependencies}
+ WHERE ${bead_dependencies.depends_on_bead_id} = ?
+ AND ${bead_dependencies.dependency_type} = 'tracks'
+ `,
+ [convoyId]
+ ),
+ ];
+ const siblingIds = z
+ .object({ bead_id: z.string() })
+ .array()
+ .parse(siblingRows)
+ .map(r => r.bead_id);
+
+ if (siblingIds.length > 0) {
+ // Delete 'blocks' edges where beadId is the blocker of a sibling
+ for (const siblingId of siblingIds) {
+ query(
+ sql,
+ /* sql */ `
+ DELETE FROM ${bead_dependencies}
+ WHERE ${bead_dependencies.bead_id} = ?
+ AND ${bead_dependencies.depends_on_bead_id} = ?
+ AND ${bead_dependencies.dependency_type} = 'blocks'
+ `,
+ [siblingId, beadId]
+ );
+ }
+ // Delete 'blocks' edges where beadId depends on a sibling
+ for (const siblingId of siblingIds) {
+ query(
+ sql,
+ /* sql */ `
+ DELETE FROM ${bead_dependencies}
+ WHERE ${bead_dependencies.bead_id} = ?
+ AND ${bead_dependencies.depends_on_bead_id} = ?
+ AND ${bead_dependencies.dependency_type} = 'blocks'
+ `,
+ [beadId, siblingId]
+ );
+ }
+ }
+
+ return true;
+}
+
/**
* Find beads that were blocked by `closedBeadId` and are now fully unblocked
* (all their 'blocks' dependencies are resolved).
@@ -715,6 +887,131 @@ export function deleteBead(sql: SqlStorage, beadId: string): void {
query(sql, /* sql */ `DELETE FROM ${beads} WHERE ${beads.bead_id} = ?`, [beadId]);
}
+export function deleteBeads(sql: SqlStorage, beadIds: string[]): number {
+ if (beadIds.length === 0) return 0;
+
+ const allIds = new Set(beadIds);
+
+ // Expand with child beads (molecule steps, etc.)
+ // Dynamic IN clauses use sql.exec directly — the type-safe query()
+ // wrapper can't infer placeholder count from runtime-built strings.
+ const ph = (ids: string[]) => ids.map(() => '?').join(',');
+ const childRows = [
+ ...sql.exec(
+ /* sql */ `SELECT ${beads.bead_id} FROM ${beads} WHERE ${beads.parent_bead_id} IN (${ph(beadIds)})`,
+ ...beadIds
+ ),
+ ];
+ const childIds = BeadRecord.pick({ bead_id: true })
+ .array()
+ .parse(childRows)
+ .map(r => r.bead_id);
+
+ // Recursively collect children of children
+ if (childIds.length > 0) {
+ for (const childId of childIds) {
+ allIds.add(childId);
+ }
+ // Recurse for deeper nesting
+ const deeperIds = collectChildBeadIds(sql, childIds);
+ for (const id of deeperIds) {
+ allIds.add(id);
+ }
+ }
+
+ const allIdsArr = [...allIds];
+ const placeholders = ph(allIdsArr);
+
+ // Unhook agents assigned to any of these beads
+ sql.exec(
+ /* sql */ `UPDATE ${agent_metadata}
+ SET ${agent_metadata.columns.current_hook_bead_id} = NULL,
+ ${agent_metadata.columns.status} = 'idle'
+ WHERE ${agent_metadata.current_hook_bead_id} IN (${placeholders})`,
+ ...allIdsArr
+ );
+
+ // Delete dependencies referencing any of these beads
+ sql.exec(
+ /* sql */ `DELETE FROM ${bead_dependencies} WHERE ${bead_dependencies.bead_id} IN (${placeholders}) OR ${bead_dependencies.depends_on_bead_id} IN (${placeholders})`,
+ ...allIdsArr,
+ ...allIdsArr
+ );
+
+ // Delete events
+ sql.exec(
+ /* sql */ `DELETE FROM ${bead_events} WHERE ${bead_events.bead_id} IN (${placeholders})`,
+ ...allIdsArr
+ );
+
+ // Delete satellite metadata
+ sql.exec(
+ /* sql */ `DELETE FROM ${agent_metadata} WHERE ${agent_metadata.bead_id} IN (${placeholders})`,
+ ...allIdsArr
+ );
+ sql.exec(
+ /* sql */ `DELETE FROM ${review_metadata} WHERE ${review_metadata.bead_id} IN (${placeholders})`,
+ ...allIdsArr
+ );
+ sql.exec(
+ /* sql */ `DELETE FROM ${escalation_metadata} WHERE ${escalation_metadata.bead_id} IN (${placeholders})`,
+ ...allIdsArr
+ );
+ sql.exec(
+ /* sql */ `DELETE FROM ${convoy_metadata} WHERE ${convoy_metadata.bead_id} IN (${placeholders})`,
+ ...allIdsArr
+ );
+
+ // Delete the beads themselves
+ sql.exec(
+ /* sql */ `DELETE FROM ${beads} WHERE ${beads.bead_id} IN (${placeholders})`,
+ ...allIdsArr
+ );
+
+ return allIdsArr.length;
+}
+
+function collectChildBeadIds(sql: SqlStorage, parentIds: string[]): string[] {
+ if (parentIds.length === 0) return [];
+ const childRows = [
+ ...sql.exec(
+ /* sql */ `SELECT ${beads.bead_id} FROM ${beads} WHERE ${beads.parent_bead_id} IN (${parentIds.map(() => '?').join(',')})`,
+ ...parentIds
+ ),
+ ];
+ const childIds = BeadRecord.pick({ bead_id: true })
+ .array()
+ .parse(childRows)
+ .map(r => r.bead_id);
+ if (childIds.length === 0) return [];
+ const deeperIds = collectChildBeadIds(sql, childIds);
+ return [...childIds, ...deeperIds];
+}
+
+export function deleteBeadsByStatus(sql: SqlStorage, status: BeadStatus, type?: BeadType): number {
+ const conditions: string[] = [`${beads.status} = ?`];
+ const values: unknown[] = [status];
+
+ if (type) {
+ conditions.push(`${beads.type} = ?`);
+ values.push(type);
+ }
+
+ const rows = [
+ ...sql.exec(
+ /* sql */ `SELECT ${beads.bead_id} FROM ${beads} WHERE ${conditions.join(' AND ')}`,
+ ...values
+ ),
+ ];
+ const beadIds = BeadRecord.pick({ bead_id: true })
+ .array()
+ .parse(rows)
+ .map(r => r.bead_id);
+
+ if (beadIds.length === 0) return 0;
+ return deleteBeads(sql, beadIds);
+}
+
// ── Bead Events ─────────────────────────────────────────────────────
export function logBeadEvent(
diff --git a/services/gastown/src/dos/town/config.ts b/services/gastown/src/dos/town/config.ts
index 156211115a..403d77fd75 100644
--- a/services/gastown/src/dos/town/config.ts
+++ b/services/gastown/src/dos/town/config.ts
@@ -89,6 +89,10 @@ export async function updateTownConfig(
update.refinery.auto_resolve_pr_feedback ??
current.refinery?.auto_resolve_pr_feedback ??
false,
+ auto_resolve_merge_conflicts:
+ update.refinery.auto_resolve_merge_conflicts ??
+ current.refinery?.auto_resolve_merge_conflicts ??
+ true,
auto_merge_delay_minutes:
update.refinery.auto_merge_delay_minutes !== undefined
? update.refinery.auto_merge_delay_minutes
@@ -191,6 +195,7 @@ export type EffectiveConfig = {
review_mode: 'rework' | 'comments';
code_review: boolean;
auto_resolve_pr_feedback: boolean;
+ auto_resolve_merge_conflicts: boolean;
auto_merge_delay_minutes: number | null;
merge_strategy: MergeStrategy;
convoy_merge_mode: 'review-then-land' | 'review-and-merge';
@@ -227,6 +232,10 @@ export function resolveRigConfig(
rigOverride?.auto_resolve_pr_feedback ??
townConfig.refinery?.auto_resolve_pr_feedback ??
false,
+ auto_resolve_merge_conflicts:
+ rigOverride?.auto_resolve_merge_conflicts ??
+ townConfig.refinery?.auto_resolve_merge_conflicts ??
+ true,
auto_merge_delay_minutes:
rigOverride?.auto_merge_delay_minutes !== undefined
? rigOverride.auto_merge_delay_minutes
diff --git a/services/gastown/src/dos/town/container-dispatch.ts b/services/gastown/src/dos/town/container-dispatch.ts
index e113bc4174..e2559f5d27 100644
--- a/services/gastown/src/dos/town/container-dispatch.ts
+++ b/services/gastown/src/dos/town/container-dispatch.ts
@@ -9,6 +9,7 @@ import { buildPolecatSystemPrompt } from '../../prompts/polecat-system.prompt';
import { buildMayorSystemPrompt } from '../../prompts/mayor-system.prompt';
import type { TownConfig, RigOverrideConfig } from '../../types';
import { buildContainerConfig, resolveModel, resolveSmallModel, resolveRigConfig } from './config';
+import { writeEvent } from '../../util/analytics.util';
const TOWN_LOG = '[Town.do]';
@@ -378,7 +379,7 @@ export async function startAgentInContainer(
platformIntegrationId?: string;
}>;
}
-): Promise {
+): Promise<{ started: boolean; containerFetchMs: number }> {
lastStartError = null;
console.log(
`${TOWN_LOG} startAgentInContainer: agentId=${params.agentId} role=${params.role} name=${params.agentName}`
@@ -402,7 +403,7 @@ export async function startAgentInContainer(
`${TOWN_LOG} startAgentInContainer: ABORTING — failed to mint any auth token for agent ${params.agentId}. ` +
'The agent would start without credentials and be unable to call back to the worker.'
);
- return false;
+ return { started: false, containerFetchMs: 0 };
}
// Build env vars from town config
@@ -454,6 +455,7 @@ export async function startAgentInContainer(
const rigOverride = params.rigOverride ?? null;
const effectiveConfig = resolveRigConfig(params.townConfig, rigOverride);
+ const fetchStart = Date.now();
const response = await container.fetch('http://container/agents/start', {
method: 'POST',
signal: AbortSignal.timeout(60_000),
@@ -519,6 +521,7 @@ export async function startAgentInContainer(
}),
});
+ const durationMs = Date.now() - fetchStart;
if (!response.ok) {
const text = await response.text().catch(() => '(unreadable)');
// "Already running" means a previous dispatch succeeded — the agent
@@ -528,7 +531,15 @@ export async function startAgentInContainer(
console.log(
`${TOWN_LOG} startAgentInContainer: agent ${params.agentId} already running — treating as success`
);
- return true;
+ writeEvent(env, {
+ event: 'container.agent_start_fetch',
+ townId: params.townId,
+ rigId: params.rigId,
+ agentId: params.agentId,
+ durationMs,
+ statusCode: response.status,
+ });
+ return { started: true, containerFetchMs: durationMs };
}
const errorMsg = `(${response.status}) ${text.slice(0, 300)}`;
console.error(
@@ -536,13 +547,31 @@ export async function startAgentInContainer(
`agent=${params.agentId} role=${params.role}: ${errorMsg}`
);
lastStartError = errorMsg;
+ writeEvent(env, {
+ event: 'container.agent_start_fetch',
+ townId: params.townId,
+ rigId: params.rigId,
+ agentId: params.agentId,
+ durationMs,
+ statusCode: response.status,
+ error: errorMsg,
+ });
+ return { started: false, containerFetchMs: durationMs };
}
- return response.ok;
+ writeEvent(env, {
+ event: 'container.agent_start_fetch',
+ townId: params.townId,
+ rigId: params.rigId,
+ agentId: params.agentId,
+ durationMs,
+ statusCode: response.status,
+ });
+ return { started: true, containerFetchMs: durationMs };
} catch (err) {
const message = err instanceof Error ? err.message : String(err);
console.error(`${TOWN_LOG} startAgentInContainer: EXCEPTION for agent ${params.agentId}:`, err);
lastStartError = `EXCEPTION: ${message.slice(0, 300)}`;
- return false;
+ return { started: false, containerFetchMs: 0 };
}
}
@@ -647,12 +676,12 @@ export async function checkAgentContainerStatus(
signal: AbortSignal.timeout(10_000),
});
// 404 means the container is running but has no record of this agent
- // (e.g. after container eviction). Report as 'not_found' so
- // witnessPatrol can immediately reset and redispatch the agent
+ // (e.g. after container eviction). Report as 'not_found' so the
+ // reconciler can immediately reset and redispatch the agent
// instead of waiting for the 2-hour GUPP timeout.
if (response.status === 404) return { status: 'not_found' };
// Non-OK but not 404 — container is having issues but may still
- // have the agent running. Return 'unknown' so witnessPatrol doesn't
+ // have the agent running. Return 'unknown' so the reconciler doesn't
// falsely reset a working agent.
if (!response.ok) return { status: 'unknown' };
const data: unknown = await response.json();
@@ -668,7 +697,7 @@ export async function checkAgentContainerStatus(
return { status: 'unknown' };
} catch {
// Timeout, network error, or container starting up — return
- // 'unknown' so witnessPatrol doesn't falsely reset working agents.
+ // 'unknown' so the reconciler doesn't falsely reset working agents.
// True zombies will be caught after repeated 'unknown' results
// once the GIPP/heartbeat timeout expires.
return { status: 'unknown' };
diff --git a/services/gastown/src/dos/town/patrol.ts b/services/gastown/src/dos/town/patrol.ts
index b1617599a1..2172623233 100644
--- a/services/gastown/src/dos/town/patrol.ts
+++ b/services/gastown/src/dos/town/patrol.ts
@@ -17,8 +17,6 @@ const LOG = '[patrol]';
// ── Thresholds ──────────────────────────────────────────────────────
-/** First GUPP warning (existing behavior) */
-export const GUPP_WARN_MS = 30 * 60_000; // 30 min
/** Escalate to mayor after second threshold */
export const GUPP_ESCALATE_MS = 60 * 60_000; // 1h
/** Force-stop agent after third threshold */
diff --git a/services/gastown/src/dos/town/reconciler.ts b/services/gastown/src/dos/town/reconciler.ts
index a7bfcc9acb..2422477e5f 100644
--- a/services/gastown/src/dos/town/reconciler.ts
+++ b/services/gastown/src/dos/town/reconciler.ts
@@ -18,12 +18,14 @@ import { review_metadata, ReviewMetadataRecord } from '../../db/tables/review-me
import { convoy_metadata, ConvoyMetadataRecord } from '../../db/tables/convoy-metadata.table';
import { bead_dependencies } from '../../db/tables/bead-dependencies.table';
import { agent_nudges } from '../../db/tables/agent-nudges.table';
+import { escalation_metadata } from '../../db/tables/escalation-metadata.table';
import { query } from '../../util/query.util';
import {
GUPP_ESCALATE_MS,
GUPP_FORCE_STOP_MS,
AGENT_GC_RETENTION_MS,
TRIAGE_LABEL_LIKE,
+ createTriageRequest,
} from './patrol';
import { MAX_DISPATCH_ATTEMPTS } from './scheduling';
import * as reviewQueue from './review-queue';
@@ -45,6 +47,15 @@ const CIRCUIT_BREAKER_FAILURE_THRESHOLD = 20;
/** Window in minutes for counting dispatch failures. */
const CIRCUIT_BREAKER_WINDOW_MINUTES = 30;
+/** Max landing MR creation attempts before failing the convoy (#2260). */
+const MAX_LANDING_MR_ATTEMPTS = 5;
+
+/** Base cooldown for landing MR retry: min(2^attempts * BASE, MAX) (#2260). */
+const LANDING_MR_COOLDOWN_BASE_MS = 30_000; // 30s
+
+/** Max cooldown for landing MR retry (#2260). */
+const LANDING_MR_COOLDOWN_MAX_MS = 30 * 60_000; // 30 min
+
/**
* Town-level dispatch circuit breaker. Counts beads with at least one
* dispatch attempt in the recent window that have not yet closed
@@ -202,7 +213,11 @@ type ConvoyRow = z.infer;
*
* See reconciliation-spec.md §5.2.
*/
-export function applyEvent(sql: SqlStorage, event: TownEventRecord): void {
+export function applyEvent(
+ sql: SqlStorage,
+ event: TownEventRecord,
+ opts?: { townConfig?: TownConfig }
+): void {
const payload = event.payload;
switch (event.event_type) {
@@ -394,6 +409,27 @@ export function applyEvent(sql: SqlStorage, event: TownEventRecord): void {
const hasFailingChecks = payload.has_failing_checks === true;
const hasUncheckedRuns = payload.has_unchecked_runs === true;
+ // Consolidation: if there's already an open gt:pr-conflict bead for this MR,
+ // add has_feedback: true to it instead of creating a separate feedback bead.
+ // The agent resolving conflicts will then also address review feedback afterward.
+ const existingConflictBeadId = getExistingPrConflictBeadId(sql, mrBeadId);
+ if (existingConflictBeadId) {
+ query(
+ sql,
+ /* sql */ `
+ UPDATE ${beads}
+ SET ${beads.columns.metadata} = json_set(COALESCE(${beads.metadata}, '{}'), '$.has_feedback', 1),
+ ${beads.columns.updated_at} = ?
+ WHERE ${beads.bead_id} = ?
+ `,
+ [new Date().toISOString(), existingConflictBeadId]
+ );
+ console.log(
+ `${LOG} pr_feedback_detected: merged into existing conflict bead ${existingConflictBeadId} (mrBeadId=${mrBeadId})`
+ );
+ return;
+ }
+
const feedbackBead = beadOps.createBead(sql, {
type: 'issue',
title: buildFeedbackBeadTitle(
@@ -426,6 +462,143 @@ export function applyEvent(sql: SqlStorage, event: TownEventRecord): void {
return;
}
+ case 'pr_conflict_detected': {
+ const mrBeadId = typeof payload.mr_bead_id === 'string' ? payload.mr_bead_id : null;
+ if (!mrBeadId) {
+ console.warn(`${LOG} applyEvent: pr_conflict_detected missing mr_bead_id`);
+ return;
+ }
+
+ const mrBead = beadOps.getBead(sql, mrBeadId);
+ if (!mrBead || mrBead.status === 'closed' || mrBead.status === 'failed') return;
+
+ // Idempotent: check for an existing open gt:pr-conflict bead for this pr_url
+ if (hasExistingPrConflictBead(sql, mrBeadId)) return;
+
+ const prUrl = typeof payload.pr_url === 'string' ? payload.pr_url : '';
+ const branch = typeof payload.branch === 'string' ? payload.branch : '';
+ const sourceBead = typeof payload.source_bead_id === 'string' ? payload.source_bead_id : null;
+
+ // Read the target_branch from review_metadata
+ const rmRows = z
+ .object({ target_branch: z.string() })
+ .array()
+ .parse([
+ ...query(
+ sql,
+ /* sql */ `
+ SELECT ${review_metadata.columns.target_branch}
+ FROM ${review_metadata}
+ WHERE ${review_metadata.bead_id} = ?
+ `,
+ [mrBeadId]
+ ),
+ ]);
+ const targetBranch = rmRows[0]?.target_branch ?? '';
+
+ // Read auto_resolve_merge_conflicts using the same fallback chain as
+ // auto_resolve_pr_feedback: rig override → town config → default (true).
+ const rig = mrBead.rig_id ? getRig(sql, mrBead.rig_id) : null;
+ const effectiveConfig = opts?.townConfig
+ ? resolveRigConfig(opts.townConfig, rig?.config ?? null)
+ : { auto_resolve_merge_conflicts: rig?.config?.auto_resolve_merge_conflicts !== false };
+ const autoResolveConflicts = effectiveConfig.auto_resolve_merge_conflicts !== false;
+
+ if (autoResolveConflicts) {
+ // Consolidation: if there's already an open gt:pr-feedback bead for this MR,
+ // add has_conflicts: true to it instead of creating a separate conflict bead.
+ // The agent handling the feedback bead will resolve conflicts first, then
+ // address review comments.
+ const existingFeedbackBeadId = getExistingPrFeedbackBeadId(sql, mrBeadId);
+ if (existingFeedbackBeadId) {
+ query(
+ sql,
+ /* sql */ `
+ UPDATE ${beads}
+ SET ${beads.columns.metadata} = json_set(COALESCE(${beads.metadata}, '{}'), '$.has_conflicts', 1, '$.conflict_target_branch', ?),
+ ${beads.columns.updated_at} = ?
+ WHERE ${beads.bead_id} = ?
+ `,
+ [targetBranch, new Date().toISOString(), existingFeedbackBeadId]
+ );
+ console.log(
+ `${LOG} pr_conflict_detected: merged into existing feedback bead ${existingFeedbackBeadId} (mrBeadId=${mrBeadId})`
+ );
+ return;
+ }
+
+ const conflictBead = beadOps.createBead(sql, {
+ type: 'issue',
+ title: `Resolve merge conflicts on PR: ${branch}`,
+ body: buildConflictResolutionPrompt(prUrl, branch, targetBranch),
+ rig_id: mrBead.rig_id ?? undefined,
+ parent_bead_id: mrBeadId,
+ labels: ['gt:pr-conflict'],
+ metadata: {
+ pr_url: prUrl,
+ branch,
+ target_branch: targetBranch,
+ mr_bead_id: mrBeadId,
+ source_bead_id: sourceBead,
+ },
+ });
+
+ // Conflict bead blocks the MR bead (same pattern as feedback beads)
+ beadOps.insertDependency(sql, mrBeadId, conflictBead.bead_id, 'blocks');
+ } else {
+ // auto_resolve_merge_conflicts disabled — route through the full
+ // escalation pipeline so escalation_metadata, triage request, and
+ // mayor notification are all created (same path as routeEscalation()).
+ const escalationBead = beadOps.createBead(sql, {
+ type: 'escalation',
+ title: `Merge conflict detected: ${branch}`,
+ body: `PR ${prUrl} (branch ${branch}) has merge conflicts that require manual resolution.`,
+ priority: 'high',
+ rig_id: mrBead.rig_id ?? undefined,
+ labels: ['gt:escalation', 'severity:high'],
+ metadata: {
+ pr_url: prUrl,
+ branch,
+ target_branch: targetBranch,
+ mr_bead_id: mrBeadId,
+ source_bead_id: sourceBead,
+ conflict: true,
+ },
+ });
+ query(
+ sql,
+ /* sql */ `
+ INSERT INTO ${escalation_metadata} (
+ ${escalation_metadata.columns.bead_id},
+ ${escalation_metadata.columns.severity},
+ ${escalation_metadata.columns.category},
+ ${escalation_metadata.columns.acknowledged},
+ ${escalation_metadata.columns.re_escalation_count},
+ ${escalation_metadata.columns.acknowledged_at}
+ ) VALUES (?, ?, ?, ?, ?, ?)
+ `,
+ [escalationBead.bead_id, 'high', 'merge_conflict', 0, 0, null]
+ );
+ createTriageRequest(sql, {
+ triageType: 'escalation',
+ agentBeadId: null,
+ title: `Escalation (high): Merge conflict on ${branch}`,
+ context: {
+ escalation_bead_id: escalationBead.bead_id,
+ severity: 'high',
+ rig_id: mrBead.rig_id,
+ category: 'merge_conflict',
+ pr_url: prUrl,
+ branch,
+ mr_bead_id: mrBeadId,
+ },
+ options: ['ESCALATE_TO_MAYOR', 'RESTART', 'CLOSE_BEAD', 'REASSIGN_BEAD'],
+ rigId: mrBead.rig_id ?? undefined,
+ });
+ }
+ return;
+ }
+
case 'pr_auto_merge': {
const mrBeadId = typeof payload.mr_bead_id === 'string' ? payload.mr_bead_id : null;
if (!mrBeadId) {
@@ -539,7 +712,7 @@ export function reconcileAgents(sql: SqlStorage, opts?: { draining?: boolean }):
// Agent is working with fresh heartbeat but no hook — it's running
// in the container but has no bead to work on (gt_done already ran,
// or the hook was cleared by another code path). Set to idle so
- // processReviewQueue / schedulePendingWork can use it.
+ // the reconciler can dispatch it to new work.
actions.push({
type: 'transition_agent',
agent_id: agent.bead_id,
@@ -810,7 +983,7 @@ export function reconcileBeads(
});
}
- // Rule 2: Idle agents with hooks need dispatch (schedulePendingWork equivalent)
+ // Rule 2: Idle agents with hooks need dispatch
const idleHooked = AgentRow.array().parse([
...query(
sql,
@@ -1302,9 +1475,14 @@ export function reconcileReviewQueue(
}
// Orphan cleanup: open MR beads without pr_url that aren't convoy
- // review-and-merge beads. The polecat should have created the PR
- // (merge_strategy=pr) but didn't — fail the MR and reopen the
- // source bead so another polecat can retry.
+ // review-and-merge beads or system-created landing MR beads.
+ // The polecat should have created the PR (merge_strategy=pr) but
+ // didn't — fail the MR and reopen the source bead so another
+ // polecat can retry.
+ // Landing MR beads (created_by='system') are excluded because they
+ // are created by reconcileConvoys for review-then-land convoys and
+ // intentionally have no pr_url at creation — the refinery creates
+ // the PR when it picks up the landing MR.
const orphanedMrs = z
.object({ bead_id: z.string(), source_bead_id: z.string().nullable() })
.array()
@@ -1324,6 +1502,7 @@ export function reconcileReviewQueue(
AND b.${beads.columns.status} = 'open'
AND b.${beads.columns.rig_id} = ?
AND rm.${review_metadata.columns.pr_url} IS NULL
+ AND b.${beads.columns.created_by} != 'system'
AND NOT EXISTS (
SELECT 1
FROM ${beads} parent
@@ -1386,21 +1565,25 @@ export function reconcileReviewQueue(
]);
for (const { rig_id } of rigsWithOpenMrs) {
- // When code_review=false, only dispatch the refinery for convoy
- // review-and-merge MR beads (refinery does combined review+merge).
+ // When code_review=false, only dispatch the refinery for:
+ // 1. Convoy review-and-merge MR beads (refinery does combined review+merge)
+ // 2. System-created landing MR beads (review-then-land convoy finalization)
// MR beads WITH a pr_url are handled by the fast-track → poll_pr.
// MR beads WITHOUT a pr_url when merge_strategy=pr are orphaned
- // (polecat should have created the PR) — Rule 2 handles them.
+ // (polecat should have created the PR) — orphan cleanup handles them.
const refineryNeededFilter = rigCodeReview(rig_id)
? ''
: /* sql */ `
- AND EXISTS (
- SELECT 1
- FROM ${beads} outer_parent
- JOIN ${convoy_metadata} cm
- ON cm.${convoy_metadata.columns.bead_id} = outer_parent.${beads.columns.bead_id}
- WHERE outer_parent.${beads.columns.bead_id} = ${beads.parent_bead_id}
- AND cm.${convoy_metadata.columns.merge_mode} = 'review-and-merge'
+ AND (
+ EXISTS (
+ SELECT 1
+ FROM ${beads} outer_parent
+ JOIN ${convoy_metadata} cm
+ ON cm.${convoy_metadata.columns.bead_id} = outer_parent.${beads.columns.bead_id}
+ WHERE outer_parent.${beads.columns.bead_id} = ${beads.parent_bead_id}
+ AND cm.${convoy_metadata.columns.merge_mode} = 'review-and-merge'
+ )
+ OR ${beads.created_by} = 'system'
)`;
// Check if rig already has an in_progress MR that needs the refinery.
@@ -1723,14 +1906,19 @@ export function reconcileConvoys(sql: SqlStorage): Action[] {
if (progressRows.length === 0) continue;
const { closed_count, total_count } = progressRows[0];
- // Update progress if stale
- if (closed_count !== convoy.closed_beads) {
- actions.push({
- type: 'update_convoy_progress',
- convoy_id: convoy.bead_id,
- closed_beads: closed_count,
- });
+ // Parse convoy metadata for landing MR tracking fields (#2260)
+ let parsedMeta: Record = {};
+ try {
+ parsedMeta = JSON.parse(convoy.metadata) as Record;
+ } catch {
+ /* ignore */
}
+ const landingMrAttempts =
+ typeof parsedMeta.landing_mr_attempts === 'number' ? parsedMeta.landing_mr_attempts : 0;
+ const lastLandingMrAttemptAt =
+ typeof parsedMeta.last_landing_mr_attempt_at === 'string'
+ ? parsedMeta.last_landing_mr_attempt_at
+ : null;
// Check for in-flight MR beads (open or in_progress) for tracked issue beads
const inFlightMrCount = z
@@ -1759,68 +1947,130 @@ export function reconcileConvoys(sql: SqlStorage): Action[] {
const hasInFlightReviews = (inFlightMrCount[0]?.cnt ?? 0) > 0;
// Check if all beads done
- if (closed_count >= total_count && total_count > 0 && !hasInFlightReviews) {
- let parsedMeta: Record = {};
- try {
- parsedMeta = JSON.parse(convoy.metadata) as Record;
- } catch {
- /* ignore */
+ const allBeadsDone = closed_count >= total_count && total_count > 0 && !hasInFlightReviews;
+
+ // Update progress if stale (skip if we're failing/closing the convoy this tick)
+ if (closed_count !== convoy.closed_beads) {
+ actions.push({
+ type: 'update_convoy_progress',
+ convoy_id: convoy.bead_id,
+ closed_beads: closed_count,
+ });
+ }
+
+ if (!allBeadsDone) continue;
+
+ if (convoy.merge_mode === 'review-then-land' && convoy.feature_branch) {
+ if (!parsedMeta.ready_to_land) {
+ actions.push({
+ type: 'set_convoy_ready_to_land',
+ convoy_id: convoy.bead_id,
+ });
}
- if (convoy.merge_mode === 'review-then-land' && convoy.feature_branch) {
- if (!parsedMeta.ready_to_land) {
+ if (parsedMeta.ready_to_land) {
+ // Check if a landing MR already exists (any status)
+ const landingMrs = z
+ .object({ status: z.string() })
+ .array()
+ .parse([
+ ...query(
+ sql,
+ /* sql */ `
+ SELECT mr.${beads.columns.status}
+ FROM ${bead_dependencies} bd
+ INNER JOIN ${beads} mr ON mr.${beads.columns.bead_id} = bd.${bead_dependencies.columns.bead_id}
+ WHERE bd.${bead_dependencies.columns.depends_on_bead_id} = ?
+ AND bd.${bead_dependencies.columns.dependency_type} = 'tracks'
+ AND mr.${beads.columns.type} = 'merge_request'
+ `,
+ [convoy.bead_id]
+ ),
+ ]);
+
+ // If a landing MR was already merged (closed), close the convoy
+ const hasMergedLanding = landingMrs.some(mr => mr.status === 'closed');
+ if (hasMergedLanding) {
+ actions.push({
+ type: 'close_convoy',
+ convoy_id: convoy.bead_id,
+ });
+ continue;
+ }
+
+ // Fix 1 (#2260): If a landing MR is active (open or in_progress), wait — don't create another
+ const hasActiveLanding = landingMrs.some(
+ mr => mr.status === 'open' || mr.status === 'in_progress'
+ );
+ if (hasActiveLanding) continue;
+
+ // Fix 2 (#2260): If max landing MR attempts exceeded and no landing MR is
+ // active or merged, fail the convoy. Checked after landing MR status lookup
+ // so the final allowed attempt can still succeed.
+ if (landingMrAttempts >= MAX_LANDING_MR_ATTEMPTS) {
actions.push({
- type: 'set_convoy_ready_to_land',
+ type: 'fail_convoy',
convoy_id: convoy.bead_id,
+ reason: `Landing MR creation failed after ${MAX_LANDING_MR_ATTEMPTS} attempts`,
});
+ continue;
+ }
+
+ // Fix 2 (#2260): Apply exponential cooldown between landing MR attempts
+ if (landingMrAttempts > 0 && lastLandingMrAttemptAt) {
+ const elapsed = Date.now() - new Date(lastLandingMrAttemptAt).getTime();
+ const cooldownMs = Math.min(
+ Math.pow(2, landingMrAttempts) * LANDING_MR_COOLDOWN_BASE_MS,
+ LANDING_MR_COOLDOWN_MAX_MS
+ );
+ if (elapsed < cooldownMs) continue;
}
- if (parsedMeta.ready_to_land) {
- // Check if a landing MR already exists (any status)
- const landingMrs = z
- .object({ status: z.string() })
+ // Fix 3 (#2260): Check that tracked beads have at least one MR with a PR URL.
+ // For review-then-land convoys using direct merge strategy, intermediate bead
+ // merges go straight into the feature branch without persisting a pr_url —
+ // skip this guard and always create the landing MR when all beads are closed.
+ const needsPrUrl = convoy.merge_mode !== 'review-then-land';
+ if (needsPrUrl) {
+ const convoyBeadsWithPr = z
+ .object({ cnt: z.number() })
.array()
.parse([
...query(
sql,
/* sql */ `
- SELECT mr.${beads.columns.status}
- FROM ${bead_dependencies} bd
- INNER JOIN ${beads} mr ON mr.${beads.columns.bead_id} = bd.${bead_dependencies.columns.bead_id}
- WHERE bd.${bead_dependencies.columns.depends_on_bead_id} = ?
- AND bd.${bead_dependencies.columns.dependency_type} = 'tracks'
- AND mr.${beads.columns.type} = 'merge_request'
- `,
+ SELECT count(*) as cnt
+ FROM ${bead_dependencies} track_dep
+ INNER JOIN ${bead_dependencies} mr_dep
+ ON mr_dep.${bead_dependencies.columns.depends_on_bead_id} = track_dep.${bead_dependencies.columns.bead_id}
+ INNER JOIN ${review_metadata} rm
+ ON rm.${review_metadata.columns.bead_id} = mr_dep.${bead_dependencies.columns.bead_id}
+ WHERE track_dep.${bead_dependencies.columns.depends_on_bead_id} = ?
+ AND track_dep.${bead_dependencies.columns.dependency_type} = 'tracks'
+ AND mr_dep.${bead_dependencies.columns.dependency_type} = 'tracks'
+ AND rm.${review_metadata.columns.pr_url} IS NOT NULL
+ `,
[convoy.bead_id]
),
]);
- // If a landing MR was already merged (closed), close the convoy
- const hasMergedLanding = landingMrs.some(mr => mr.status === 'closed');
- if (hasMergedLanding) {
- actions.push({
- type: 'close_convoy',
- convoy_id: convoy.bead_id,
- });
+ if ((convoyBeadsWithPr[0]?.cnt ?? 0) === 0) {
+ console.warn(
+ `${LOG} convoy ${convoy.bead_id} has no beads with pr_url — skipping create_landing_mr`
+ );
continue;
}
+ }
- // If a landing MR is active (open or in_progress), wait for it
- const hasActiveLanding = landingMrs.some(
- mr => mr.status === 'open' || mr.status === 'in_progress'
- );
- if (hasActiveLanding) continue;
-
- // No landing MR exists yet — create one
- {
- // Need rig_id from one of the tracked beads
- const rigRows = z
- .object({ rig_id: z.string() })
- .array()
- .parse([
- ...query(
- sql,
- /* sql */ `
+ // No landing MR exists yet and cooldown has passed — create one
+ {
+ const rigRows = z
+ .object({ rig_id: z.string() })
+ .array()
+ .parse([
+ ...query(
+ sql,
+ /* sql */ `
SELECT DISTINCT tracked.${beads.columns.rig_id} as rig_id
FROM ${bead_dependencies} bd
INNER JOIN ${beads} tracked ON tracked.${beads.columns.bead_id} = bd.${bead_dependencies.columns.bead_id}
@@ -1829,29 +2079,28 @@ export function reconcileConvoys(sql: SqlStorage): Action[] {
AND tracked.${beads.columns.rig_id} IS NOT NULL
LIMIT 1
`,
- [convoy.bead_id]
- ),
- ]);
-
- if (rigRows.length > 0) {
- const rig = getRig(sql, rigRows[0].rig_id);
- actions.push({
- type: 'create_landing_mr',
- convoy_id: convoy.bead_id,
- rig_id: rigRows[0].rig_id,
- feature_branch: convoy.feature_branch,
- target_branch: rig?.default_branch ?? 'main',
- });
- }
+ [convoy.bead_id]
+ ),
+ ]);
+
+ if (rigRows.length > 0) {
+ const rig = getRig(sql, rigRows[0].rig_id);
+ actions.push({
+ type: 'create_landing_mr',
+ convoy_id: convoy.bead_id,
+ rig_id: rigRows[0].rig_id,
+ feature_branch: convoy.feature_branch,
+ target_branch: rig?.default_branch ?? 'main',
+ });
}
}
- } else {
- // review-and-merge or no feature branch — auto-close
- actions.push({
- type: 'close_convoy',
- convoy_id: convoy.bead_id,
- });
}
+ } else {
+ // review-and-merge or no feature branch — auto-close
+ actions.push({
+ type: 'close_convoy',
+ convoy_id: convoy.bead_id,
+ });
}
}
@@ -2094,24 +2343,62 @@ function hasRecentNudge(sql: SqlStorage, agentId: string, tier: string): boolean
return rows.length > 0;
}
+/** Check if an MR bead has a non-terminal conflict bead (gt:pr-conflict) blocking it. */
+function hasExistingPrConflictBead(sql: SqlStorage, mrBeadId: string): boolean {
+ return getExistingPrConflictBeadId(sql, mrBeadId) !== null;
+}
+
+/** Return the bead_id of a non-terminal conflict bead (gt:pr-conflict) blocking the MR, or null. */
+function getExistingPrConflictBeadId(sql: SqlStorage, mrBeadId: string): string | null {
+ const rows = z
+ .object({ bead_id: z.string() })
+ .array()
+ .parse([
+ ...query(
+ sql,
+ /* sql */ `
+ SELECT fb.${beads.columns.bead_id}
+ FROM ${bead_dependencies} bd
+ INNER JOIN ${beads} fb ON fb.${beads.columns.bead_id} = bd.${bead_dependencies.columns.depends_on_bead_id}
+ WHERE bd.${bead_dependencies.columns.bead_id} = ?
+ AND bd.${bead_dependencies.columns.dependency_type} = 'blocks'
+ AND fb.${beads.columns.labels} LIKE '%gt:pr-conflict%'
+ AND fb.${beads.columns.status} NOT IN ('closed', 'failed')
+ LIMIT 1
+ `,
+ [mrBeadId]
+ ),
+ ]);
+ return rows.length > 0 ? rows[0].bead_id : null;
+}
+
/** Check if an MR bead has a non-terminal feedback bead (gt:pr-feedback) blocking it. */
function hasExistingPrFeedbackBead(sql: SqlStorage, mrBeadId: string): boolean {
- const rows = [
- ...query(
- sql,
- /* sql */ `
- SELECT 1 FROM ${bead_dependencies} bd
- INNER JOIN ${beads} fb ON fb.${beads.columns.bead_id} = bd.${bead_dependencies.columns.depends_on_bead_id}
- WHERE bd.${bead_dependencies.columns.bead_id} = ?
- AND bd.${bead_dependencies.columns.dependency_type} = 'blocks'
- AND fb.${beads.columns.labels} LIKE '%gt:pr-feedback%'
- AND fb.${beads.columns.status} NOT IN ('closed', 'failed')
- LIMIT 1
- `,
- [mrBeadId]
- ),
- ];
- return rows.length > 0;
+ return getExistingPrFeedbackBeadId(sql, mrBeadId) !== null;
+}
+
+/** Return the bead_id of a non-terminal feedback bead (gt:pr-feedback) blocking the MR, or null. */
+function getExistingPrFeedbackBeadId(sql: SqlStorage, mrBeadId: string): string | null {
+ const rows = z
+ .object({ bead_id: z.string() })
+ .array()
+ .parse([
+ ...query(
+ sql,
+ /* sql */ `
+ SELECT fb.${beads.columns.bead_id}
+ FROM ${bead_dependencies} bd
+ INNER JOIN ${beads} fb ON fb.${beads.columns.bead_id} = bd.${bead_dependencies.columns.depends_on_bead_id}
+ WHERE bd.${bead_dependencies.columns.bead_id} = ?
+ AND bd.${bead_dependencies.columns.dependency_type} = 'blocks'
+ AND fb.${beads.columns.labels} LIKE '%gt:pr-feedback%'
+ AND fb.${beads.columns.status} NOT IN ('closed', 'failed')
+ LIMIT 1
+ `,
+ [mrBeadId]
+ ),
+ ]);
+ return rows.length > 0 ? rows[0].bead_id : null;
}
/** Build a human-readable title for the feedback bead. */
@@ -2192,6 +2479,49 @@ function buildFeedbackPrompt(
return lines.join('\n');
}
+/** Build the polecat prompt body for resolving merge conflicts on a PR branch. */
+function buildConflictResolutionPrompt(
+ prUrl: string,
+ branch: string,
+ targetBranch: string
+): string {
+ const lines: string[] = [];
+ lines.push(`You are resolving merge conflicts on branch \`${branch}\`.`);
+ lines.push(`The PR is: ${prUrl}`);
+ lines.push(`The target branch is: \`${targetBranch}\``);
+ lines.push('');
+ lines.push('## Steps');
+ lines.push('');
+ lines.push('1. Fetch the latest state of the remote:');
+ lines.push(' ```');
+ lines.push(' git fetch origin');
+ lines.push(' ```');
+ lines.push('');
+ lines.push(`2. Rebase your branch onto the target branch to incorporate its latest changes:`);
+ lines.push(' ```');
+ lines.push(` git rebase origin/${targetBranch}`);
+ lines.push(' ```');
+ lines.push('');
+ lines.push('3. If there are conflicts during rebase, resolve them:');
+ lines.push(
+ ' - Edit the conflicting files to resolve the conflict markers (`<<<<<<<`, `=======`, `>>>>>>>`)'
+ );
+ lines.push(' - Stage the resolved files: `git add `');
+ lines.push(' - Continue the rebase: `git rebase --continue`');
+ lines.push(' - Repeat until the rebase completes');
+ lines.push('');
+ lines.push('4. Push the rebased branch:');
+ lines.push(' ```');
+ lines.push(` git push --force-with-lease origin ${branch}`);
+ lines.push(' ```');
+ lines.push('');
+ lines.push('5. Call `gt_done` once the push succeeds, passing both required arguments:');
+ lines.push(` - \`pr_url\`: \`${prUrl}\``);
+ lines.push(` - \`branch\`: \`${branch}\``);
+
+ return lines.join('\n');
+}
+
// ════════════════════════════════════════════════════════════════════
// Invariant checker — runs after action application to detect
// violations of the system invariants from spec §6.
@@ -2236,7 +2566,9 @@ export function checkInvariants(sql: SqlStorage): Violation[] {
}
// Invariant 5: Convoy beads should not be in unexpected states.
- // Valid transient states: open, in_progress, in_review, closed.
+ // Valid states: open, in_progress, in_review, closed, failed.
+ // 'failed' is a terminal state set by FailConvoy when landing MR
+ // creation is exhausted.
const badStateConvoys = z
.object({ bead_id: z.string(), status: z.string() })
.array()
@@ -2247,7 +2579,7 @@ export function checkInvariants(sql: SqlStorage): Violation[] {
SELECT ${beads.bead_id}, ${beads.status}
FROM ${beads}
WHERE ${beads.type} = 'convoy'
- AND ${beads.status} NOT IN ('open', 'in_progress', 'in_review', 'closed')
+ AND ${beads.status} NOT IN ('open', 'in_progress', 'in_review', 'closed', 'failed')
`,
[]
),
diff --git a/services/gastown/src/dos/town/review-queue.ts b/services/gastown/src/dos/town/review-queue.ts
index e25819107b..80fd1b6b1a 100644
--- a/services/gastown/src/dos/town/review-queue.ts
+++ b/services/gastown/src/dos/town/review-queue.ts
@@ -208,53 +208,6 @@ export function submitToReviewQueue(sql: SqlStorage, input: ReviewQueueInput): v
});
}
-export function popReviewQueue(sql: SqlStorage): ReviewQueueEntry | null {
- // Pop the oldest open MR bead, but skip any whose source bead already
- // has another MR in_progress (i.e. a refinery is already reviewing it).
- // This prevents popping stale MR beads and triggering failReviewWithRework
- // while an active review is in flight for the same source.
- //
- // The source bead is linked via bead_dependencies (dependency_type='tracks'):
- // bead_dependencies.bead_id = MR bead
- // bead_dependencies.depends_on_bead_id = source bead
- const rows = [
- ...query(
- sql,
- /* sql */ `
- ${REVIEW_JOIN}
- WHERE ${beads.status} = 'open'
- AND NOT EXISTS (
- SELECT 1 FROM ${beads} AS active_mr
- WHERE active_mr.${beads.columns.type} = 'merge_request'
- AND active_mr.${beads.columns.status} = 'in_progress'
- AND active_mr.${beads.columns.rig_id} = ${beads.rig_id}
- )
- ORDER BY ${beads.created_at} ASC
- LIMIT 1
- `,
- []
- ),
- ];
-
- if (rows.length === 0) return null;
- const parsed = MergeRequestBeadRecord.parse(rows[0]);
- const entry = toReviewQueueEntry(parsed);
-
- // Mark as running (in_progress)
- query(
- sql,
- /* sql */ `
- UPDATE ${beads}
- SET ${beads.columns.status} = 'in_progress',
- ${beads.columns.updated_at} = ?
- WHERE ${beads.bead_id} = ?
- `,
- [now(), entry.id]
- );
-
- return { ...entry, status: 'running', processed_at: now() };
-}
-
export function completeReview(
sql: SqlStorage,
entryId: string,
@@ -369,8 +322,8 @@ export function completeReviewWithResult(
conflict: true,
},
});
- // Return source bead to open so the normal scheduling path handles
- // rework. Clear assignee so feedStrandedConvoys can match.
+ // Return source bead to open so the reconciler's scheduling path handles
+ // rework. Clear assignee so the reconciler can match it for dispatch.
const conflictSourceBead = getBead(sql, entry.bead_id);
if (
conflictSourceBead &&
@@ -390,11 +343,10 @@ export function completeReviewWithResult(
}
} else if (input.status === 'failed') {
// Review failed (rework requested): return source bead to open so
- // the normal scheduling path (feedStrandedConvoys → hookBead →
- // schedulePendingWork → dispatch) handles rework. Clear the stale
- // assignee so feedStrandedConvoys can match (requires assignee IS NULL).
- // This avoids the fire-and-forget rework dispatch race in TownDO
- // where the dispatch fails and rehookOrphanedBeads churn.
+ // the reconciler's scheduling path handles rework. Clear the stale
+ // assignee so the reconciler can match it for dispatch (requires
+ // assignee IS NULL). This avoids a fire-and-forget rework dispatch
+ // race where the dispatch fails and the bead churns.
const sourceBead = getBead(sql, entry.bead_id);
if (sourceBead && sourceBead.status !== 'closed' && sourceBead.status !== 'failed') {
updateBeadStatus(sql, entry.bead_id, 'open', entry.agent_id);
@@ -498,9 +450,8 @@ export function agentDone(sql: SqlStorage, agentId: string, input: AgentDoneInpu
const agent = getAgent(sql, agentId);
if (!agent) throw new Error(`Agent ${agentId} not found`);
if (!agent.current_hook_bead_id) {
- // The agent was unhooked by a recovery path (witnessPatrol,
- // rehookOrphanedBeads) between when the agent finished work and
- // when it called gt_done.
+ // The agent was unhooked by a recovery path between when the agent
+ // finished work and when it called gt_done.
//
// For refineries, this is critical: the refinery successfully merged
// but the hook was cleared by zombie detection. We MUST still complete
@@ -581,9 +532,12 @@ export function agentDone(sql: SqlStorage, agentId: string, input: AgentDoneInpu
// PR-fixup beads skip the review queue. The polecat pushed fixup commits
// to an existing PR branch — no separate review is needed.
- if (hookedBead?.labels.includes('gt:pr-fixup')) {
+ // PR-conflict beads also skip the review queue: the polecat rebased and
+ // force-pushed the branch to resolve conflicts — closing the bead unblocks
+ // the parent MR bead so poll_pr can re-check mergeable_state.
+ if (hookedBead?.labels.includes('gt:pr-fixup') || hookedBead?.labels.includes('gt:pr-conflict')) {
console.log(
- `[review-queue] agentDone: pr-fixup bead ${agent.current_hook_bead_id} — closing directly (skip review)`
+ `[review-queue] agentDone: ${hookedBead.labels.includes('gt:pr-conflict') ? 'pr-conflict' : 'pr-fixup'} bead ${agent.current_hook_bead_id} — closing directly (skip review)`
);
closeBead(sql, agent.current_hook_bead_id, agentId);
unhookBead(sql, agentId);
@@ -648,9 +602,9 @@ export function agentDone(sql: SqlStorage, agentId: string, input: AgentDoneInpu
unhookBead(sql, agentId);
// Set refinery to idle immediately — the review is done and the
- // refinery is available for new work. Without this, processReviewQueue
- // sees the refinery as 'working' and won't pop the next MR bead until
- // agentCompleted fires (when the container process eventually exits).
+ // refinery is available for new work. Without this, the reconciler
+ // sees the refinery as 'working' and won't dispatch the next MR bead
+ // until agentCompleted fires (when the container process eventually exits).
updateAgentStatus(sql, agentId, 'idle');
return;
}
@@ -659,7 +613,7 @@ export function agentDone(sql: SqlStorage, agentId: string, input: AgentDoneInpu
if (!agent.rig_id) {
console.warn(
- `[review-queue] agentDone: agent ${agentId} has null rig_id — review entry may fail in processReviewQueue`
+ `[review-queue] agentDone: agent ${agentId} has null rig_id — review entry may fail in submitToReviewQueue`
);
}
@@ -718,13 +672,13 @@ export function agentCompleted(
// NEVER fail or unhook a refinery from agentCompleted.
// agentCompleted races with gt_done: the process exits, the
// container sends /completed, but gt_done's HTTP request may
- // still be in flight. If we unhook here, recoverStuckReviews
- // can fire between agentCompleted and gt_done, resetting the
- // MR bead that's about to be closed by gt_done.
+ // still be in flight. If we unhook here, a recovery path can
+ // fire between agentCompleted and gt_done, resetting the MR bead
+ // that's about to be closed by gt_done.
//
// Leave the hook intact. gt_done will close + unhook if the
- // merge succeeded. recoverStuckReviews (which checks for
- // status='working') handles the case where gt_done never arrives.
+ // merge succeeded. The reconciler (which checks for status='working')
+ // handles the case where gt_done never arrives.
//
// No-op for the bead — just fall through to mark agent idle.
} else {
diff --git a/services/gastown/src/dos/town/scheduling.ts b/services/gastown/src/dos/town/scheduling.ts
index c4063e92cc..7f0947938b 100644
--- a/services/gastown/src/dos/town/scheduling.ts
+++ b/services/gastown/src/dos/town/scheduling.ts
@@ -126,27 +126,31 @@ export async function dispatchAgent(
const rigRecord = rigs.getRig(ctx.sql, rigId);
- const started = await dispatch.startAgentInContainer(ctx.env, ctx.storage, {
- townId: ctx.townId,
- rigId,
- userId: rigConfig.userId,
- agentId: agent.id,
- agentName: agent.name,
- role: agent.role,
- identity: agent.identity,
- beadId: bead.bead_id,
- beadTitle: bead.title,
- beadBody: bead.body ?? '',
- checkpoint: agent.checkpoint,
- gitUrl: rigConfig.gitUrl,
- defaultBranch: rigConfig.defaultBranch,
- kilocodeToken,
- townConfig,
- rigOverride: rigRecord?.config ?? null,
- platformIntegrationId: rigConfig.platformIntegrationId,
- convoyFeatureBranch: convoyFeatureBranch ?? undefined,
- systemPromptOverride: options?.systemPromptOverride,
- });
+ const { started, containerFetchMs } = await dispatch.startAgentInContainer(
+ ctx.env,
+ ctx.storage,
+ {
+ townId: ctx.townId,
+ rigId,
+ userId: rigConfig.userId,
+ agentId: agent.id,
+ agentName: agent.name,
+ role: agent.role,
+ identity: agent.identity,
+ beadId: bead.bead_id,
+ beadTitle: bead.title,
+ beadBody: bead.body ?? '',
+ checkpoint: agent.checkpoint,
+ gitUrl: rigConfig.gitUrl,
+ defaultBranch: rigConfig.defaultBranch,
+ kilocodeToken,
+ townConfig,
+ rigOverride: rigRecord?.config ?? null,
+ platformIntegrationId: rigConfig.platformIntegrationId,
+ convoyFeatureBranch: convoyFeatureBranch ?? undefined,
+ systemPromptOverride: options?.systemPromptOverride,
+ }
+ );
if (started) {
// Reset dispatch_attempts on successful start — but NOT for refineries.
@@ -172,6 +176,7 @@ export async function dispatchAgent(
agentId: agent.id,
beadId: bead.bead_id,
role: agent.role,
+ durationMs: containerFetchMs,
});
} else {
// Container start returned false — but the container may have
diff --git a/services/gastown/src/dos/town/town-scm.ts b/services/gastown/src/dos/town/town-scm.ts
index 398e111622..8e2bddf377 100644
--- a/services/gastown/src/dos/town/town-scm.ts
+++ b/services/gastown/src/dos/town/town-scm.ts
@@ -45,14 +45,20 @@ export async function resolveGitHubToken(ctx: SCMContext): Promise {
+): Promise {
const townConfig = await ctx.getTownConfig();
// GitHub PR URL format: https://github.com/{owner}/{repo}/pull/{number}
@@ -87,9 +93,9 @@ export async function checkPRStatus(
const data = GitHubPRStatusSchema.safeParse(json);
if (!data.success) return null;
- if (data.data.merged) return 'merged';
- if (data.data.state === 'closed') return 'closed';
- return 'open';
+ if (data.data.merged) return { status: 'merged' };
+ if (data.data.state === 'closed') return { status: 'closed' };
+ return { status: 'open', mergeable_state: data.data.mergeable_state };
}
// GitLab MR URL format: https://{host}/{path}/-/merge_requests/{iid}
@@ -133,9 +139,9 @@ export async function checkPRStatus(
const data = GitLabMRStatusSchema.safeParse(glJson);
if (!data.success) return null;
- if (data.data.state === 'merged') return 'merged';
- if (data.data.state === 'closed') return 'closed';
- return 'open';
+ if (data.data.state === 'merged') return { status: 'merged' };
+ if (data.data.state === 'closed') return { status: 'closed' };
+ return { status: 'open' };
}
console.warn(`${TOWN_LOG} checkPRStatus: unrecognized PR URL format: ${prUrl}`);
diff --git a/services/gastown/src/gastown.worker.ts b/services/gastown/src/gastown.worker.ts
index 2e918ea215..14546cf07f 100644
--- a/services/gastown/src/gastown.worker.ts
+++ b/services/gastown/src/gastown.worker.ts
@@ -113,10 +113,14 @@ import {
handleMayorConvoyClose,
handleMayorConvoyUpdate,
handleMayorBeadDelete,
+ handleMayorBulkDeleteBeads,
+ handleMayorDeleteBeadsByStatus,
handleMayorEscalationAcknowledge,
handleMayorConvoyStart,
handleMayorUiAction,
handleMayorGetPendingNudges,
+ handleMayorConvoyAddBead,
+ handleMayorConvoyRemoveBead,
} from './handlers/mayor-tools.handler';
import { mayorAuthMiddleware } from './middleware/mayor-auth.middleware';
import { townAuthMiddleware } from './middleware/town-auth.middleware';
@@ -631,6 +635,12 @@ app.post('/api/towns/:townId/rigs/:rigId/agents/:agentId/db-snapshot', async c =
return c.json({ success: true });
});
+app.delete('/api/towns/:townId/rigs/:rigId/agents/:agentId/db-snapshot', async c => {
+ const { agentId } = c.req.param();
+ await c.env.AGENT_DB_SNAPSHOTS_KV.delete(agentId);
+ return c.json({ success: true });
+});
+
// ── Kilo User Auth ──────────────────────────────────────────────────────
// Validate Kilo user JWT (signed with NEXTAUTH_SECRET) for dashboard/user
// routes. Container→worker routes use the agent JWT middleware instead
@@ -978,6 +988,16 @@ app.delete('/api/mayor/:townId/tools/rigs/:rigId/beads/:beadId', c =>
handleMayorBeadDelete(c, c.req.param())
)
);
+app.post('/api/mayor/:townId/tools/rigs/:rigId/beads/bulk-delete', c =>
+ instrumented(c, 'POST /api/mayor/:townId/tools/rigs/:rigId/beads/bulk-delete', () =>
+ handleMayorBulkDeleteBeads(c, c.req.param())
+ )
+);
+app.post('/api/mayor/:townId/tools/rigs/:rigId/beads/delete-by-status', c =>
+ instrumented(c, 'POST /api/mayor/:townId/tools/rigs/:rigId/beads/delete-by-status', () =>
+ handleMayorDeleteBeadsByStatus(c, c.req.param())
+ )
+);
app.post('/api/mayor/:townId/tools/rigs/:rigId/agents/:agentId/reset', c =>
instrumented(c, 'POST /api/mayor/:townId/tools/rigs/:rigId/agents/:agentId/reset', () =>
handleMayorAgentReset(c, c.req.param())
@@ -1001,6 +1021,16 @@ app.post('/api/mayor/:townId/tools/escalations/:escalationId/acknowledge', c =>
app.post('/api/mayor/:townId/tools/convoys/:convoyId/start', c =>
handleMayorConvoyStart(c, c.req.param())
);
+app.post('/api/mayor/:townId/tools/convoys/:convoyId/add-bead', c =>
+ instrumented(c, 'POST /api/mayor/:townId/tools/convoys/:convoyId/add-bead', () =>
+ handleMayorConvoyAddBead(c, c.req.param())
+ )
+);
+app.post('/api/mayor/:townId/tools/convoys/:convoyId/remove-bead', c =>
+ instrumented(c, 'POST /api/mayor/:townId/tools/convoys/:convoyId/remove-bead', () =>
+ handleMayorConvoyRemoveBead(c, c.req.param())
+ )
+);
// ── tRPC ────────────────────────────────────────────────────────────────
// Serve the gastown tRPC router directly. The frontend tRPC client
// connects here instead of going through the Next.js proxy layer.
diff --git a/services/gastown/src/handlers/mayor-tools.handler.ts b/services/gastown/src/handlers/mayor-tools.handler.ts
index fcfe207ce3..00a4996a8f 100644
--- a/services/gastown/src/handlers/mayor-tools.handler.ts
+++ b/services/gastown/src/handlers/mayor-tools.handler.ts
@@ -392,6 +392,7 @@ const BeadUpdateBody = z
metadata: z.record(z.string(), z.unknown()).optional(),
rig_id: z.string().min(1).nullable().optional(),
parent_bead_id: z.string().min(1).nullable().optional(),
+ depends_on: z.array(z.string().uuid()).optional(),
})
.refine(
data =>
@@ -402,7 +403,8 @@ const BeadUpdateBody = z
data.status !== undefined ||
data.metadata !== undefined ||
data.rig_id !== undefined ||
- data.parent_bead_id !== undefined,
+ data.parent_bead_id !== undefined ||
+ data.depends_on !== undefined,
{ message: 'At least one field must be provided' }
);
@@ -635,6 +637,69 @@ export async function handleMayorBeadDelete(
return c.json(resSuccess({ deleted: true }));
}
+const MayorBulkDeleteBeadsBody = z.object({
+ bead_ids: z.array(z.string().uuid()).min(1).max(5000),
+});
+
+export async function handleMayorBulkDeleteBeads(
+ c: Context,
+ params: { townId: string; rigId: string }
+) {
+ const rigOwned = await verifyRigBelongsToTown(c, params.townId, params.rigId);
+ if (!rigOwned) {
+ return c.json(resError('Rig not found in this town'), 403);
+ }
+
+ const parsed = MayorBulkDeleteBeadsBody.safeParse(await parseJsonBody(c));
+ if (!parsed.success) {
+ return c.json(resError(`Invalid request body: ${parsed.error.message}`), 400);
+ }
+
+ const { bead_ids } = parsed.data;
+
+ console.log(
+ `${HANDLER_LOG} handleMayorBulkDeleteBeads: townId=${params.townId} rigId=${params.rigId} count=${bead_ids.length}`
+ );
+
+ const town = getTownDOStub(c.env, params.townId);
+ const count = await town.deleteBeads(bead_ids);
+
+ return c.json(resSuccess({ deleted: count }));
+}
+
+const MayorDeleteBeadsByStatusBody = z.object({
+ status: z.enum(['open', 'in_progress', 'in_review', 'closed', 'failed']),
+ type: z
+ .enum(['issue', 'message', 'escalation', 'merge_request', 'convoy', 'molecule', 'agent'])
+ .optional(),
+});
+
+export async function handleMayorDeleteBeadsByStatus(
+ c: Context,
+ params: { townId: string; rigId: string }
+) {
+ const rigOwned = await verifyRigBelongsToTown(c, params.townId, params.rigId);
+ if (!rigOwned) {
+ return c.json(resError('Rig not found in this town'), 403);
+ }
+
+ const parsed = MayorDeleteBeadsByStatusBody.safeParse(await parseJsonBody(c));
+ if (!parsed.success) {
+ return c.json(resError(`Invalid request body: ${parsed.error.message}`), 400);
+ }
+
+ const { status, type } = parsed.data;
+
+ console.log(
+ `${HANDLER_LOG} handleMayorDeleteBeadsByStatus: townId=${params.townId} rigId=${params.rigId} status=${status}${type ? ` type=${type}` : ''}`
+ );
+
+ const town = getTownDOStub(c.env, params.townId);
+ const count = await town.deleteBeadsByStatus(status, type, params.rigId);
+
+ return c.json(resSuccess({ deleted: count }));
+}
+
/**
* POST /api/mayor/:townId/tools/escalations/:escalationId/acknowledge
* Acknowledge an escalation, marking it as reviewed.
@@ -685,6 +750,69 @@ export async function handleMayorConvoyStart(
return c.json(resSuccess(result));
}
+const ConvoyAddBeadBody = z.object({
+ bead_id: z.string().uuid(),
+ depends_on: z.array(z.string().uuid()).optional(),
+});
+
+/**
+ * POST /api/mayor/:townId/tools/convoys/:convoyId/add-bead
+ * Add an existing bead to a convoy's tracking.
+ */
+export async function handleMayorConvoyAddBead(
+ c: Context,
+ params: { townId: string; convoyId: string }
+) {
+ const parsed = ConvoyAddBeadBody.safeParse(await parseJsonBody(c));
+ if (!parsed.success) {
+ return c.json(
+ { success: false, error: 'Invalid request body', issues: parsed.error.issues },
+ 400
+ );
+ }
+
+ console.log(
+ `${HANDLER_LOG} handleMayorConvoyAddBead: townId=${params.townId} convoyId=${params.convoyId} beadId=${parsed.data.bead_id}`
+ );
+
+ const town = getTownDOStub(c.env, params.townId);
+ const result = await town.convoyAddBead(
+ params.convoyId,
+ parsed.data.bead_id,
+ parsed.data.depends_on
+ );
+ return c.json(resSuccess(result));
+}
+
+const ConvoyRemoveBeadBody = z.object({
+ bead_id: z.string().uuid(),
+});
+
+/**
+ * POST /api/mayor/:townId/tools/convoys/:convoyId/remove-bead
+ * Remove a bead from a convoy's tracking.
+ */
+export async function handleMayorConvoyRemoveBead(
+ c: Context,
+ params: { townId: string; convoyId: string }
+) {
+ const parsed = ConvoyRemoveBeadBody.safeParse(await parseJsonBody(c));
+ if (!parsed.success) {
+ return c.json(
+ { success: false, error: 'Invalid request body', issues: parsed.error.issues },
+ 400
+ );
+ }
+
+ console.log(
+ `${HANDLER_LOG} handleMayorConvoyRemoveBead: townId=${params.townId} convoyId=${params.convoyId} beadId=${parsed.data.bead_id}`
+ );
+
+ const town = getTownDOStub(c.env, params.townId);
+ const result = await town.convoyRemoveBead(params.convoyId, parsed.data.bead_id);
+ return c.json(resSuccess(result));
+}
+
const MayorUiActionBody = z.object({
action: UiActionSchema,
});
diff --git a/services/gastown/src/prompts/mayor-system.prompt.ts b/services/gastown/src/prompts/mayor-system.prompt.ts
index 78c3b6d347..b0bc7d8e1d 100644
--- a/services/gastown/src/prompts/mayor-system.prompt.ts
+++ b/services/gastown/src/prompts/mayor-system.prompt.ts
@@ -214,7 +214,7 @@ You can directly edit town state when things go wrong:
- **gt_agent_reset** to force-reset a stuck agent to idle
- **gt_convoy_close** to force-close a stuck convoy
- **gt_convoy_update** to edit convoy merge_mode or feature_branch
-- **gt_bead_delete** to remove beads that shouldn't exist
+- **gt_bead_delete** to remove beads that shouldn't exist — accepts a single UUID or an array of UUIDs to bulk-delete up to 5000 at once
- **gt_escalation_acknowledge** to acknowledge escalations
Use these tools when the user reports stuck state, when you detect problems during delegation, or when you need to clean up after failures. You are the town coordinator — you have full authority over the control plane.
diff --git a/services/gastown/src/prompts/polecat-system.prompt.ts b/services/gastown/src/prompts/polecat-system.prompt.ts
index 8815dea0e0..6e6cba4c35 100644
--- a/services/gastown/src/prompts/polecat-system.prompt.ts
+++ b/services/gastown/src/prompts/polecat-system.prompt.ts
@@ -82,6 +82,31 @@ After all gates pass and your work is complete, create a pull request before cal
`
: ''
}
+## PR Conflict Resolution Workflow
+
+When your hooked bead has the \`gt:pr-conflict\` label, **or** when it has the \`gt:pr-feedback\` label and \`pr_conflict_context\` is present in your context, you are resolving merge conflicts on an existing PR branch. **This is an exception to the "do not switch branches" rule.** You MUST check out the PR branch from your bead metadata (\`pr_conflict_context.branch\`).
+
+1. Check out the PR branch: \`git fetch origin && git checkout \`
+2. Rebase onto the target branch to incorporate its latest changes:
+ \`\`\`
+ git rebase origin/
+ \`\`\`
+3. If there are conflicts during rebase, resolve them:
+ - Edit conflicting files to resolve conflict markers (\`<<<<<<<\`, \`=======\`, \`>>>>>>>\`)
+ - Stage the resolved files: \`git add \`
+ - Continue the rebase: \`git rebase --continue\`
+ - Repeat until the rebase completes
+4. Push the rebased branch:
+ \`\`\`
+ git push --force-with-lease origin
+ \`\`\`
+5. If the bead metadata has \`has_feedback: true\`, also address the PR review feedback (see PR Fixup Workflow below) before calling gt_done.
+ 6. Call \`gt_done\` with both required arguments once all conflicts are resolved (and feedback addressed if applicable):
+ - \`pr_url\`: the PR URL from \`pr_conflict_context.pr_url\`
+ - \`branch\`: the branch name from \`pr_conflict_context.branch\`
+
+Do NOT create a new PR. Push to the existing branch.
+
## PR Fixup Workflow
When your hooked bead has the \`gt:pr-fixup\` label, you are fixing an existing PR rather than creating new work. **This is the ONE exception to the "do not switch branches" rule.** You MUST check out the PR branch from your bead metadata instead of using the default worktree branch.
@@ -101,7 +126,7 @@ Do NOT create a new PR. Push to the existing branch.
- Commit after every meaningful unit of work (new function, passing test, config change).
- Push after every commit. Do not batch pushes.
- Use descriptive commit messages referencing the bead if applicable.
-- Branch naming: your branch is pre-configured in your worktree. Do not switch branches — **unless** your bead has the \`gt:pr-fixup\` label (see PR Fixup Workflow above).
+- Branch naming: your branch is pre-configured in your worktree. Do not switch branches — **unless** your bead has the \`gt:pr-fixup\` or \`gt:pr-conflict\` label (see workflows above).
## Escalation
diff --git a/services/gastown/src/trpc/router.ts b/services/gastown/src/trpc/router.ts
index 12aab73298..1b446d2790 100644
--- a/services/gastown/src/trpc/router.ts
+++ b/services/gastown/src/trpc/router.ts
@@ -650,14 +650,21 @@ export const gastownRouter = router({
.input(
z.object({
rigId: z.string().uuid(),
- beadId: z.string().uuid(),
+ beadId: z.union([z.string().uuid(), z.array(z.string().uuid())]),
townId: z.string().uuid().optional(),
})
)
+ .output(z.object({ deleted: z.number() }))
.mutation(async ({ ctx, input }) => {
const rig = await verifyRigOwnership(ctx.env, ctx, input.rigId, input.townId);
const townStub = getTownDOStub(ctx.env, rig.town_id);
- await townStub.deleteBead(input.beadId);
+ const ids = Array.isArray(input.beadId) ? input.beadId : [input.beadId];
+ if (ids.length === 1) {
+ await townStub.deleteBead(ids[0]);
+ return { deleted: 1 };
+ }
+ const count = await townStub.deleteBeads(ids);
+ return { deleted: count };
}),
updateBead: gastownProcedure
@@ -675,6 +682,7 @@ export const gastownRouter = router({
metadata: z.record(z.string(), z.unknown()).optional(),
rig_id: z.string().min(1).nullable().optional(),
parent_bead_id: z.string().min(1).nullable().optional(),
+ depends_on: z.array(z.string().uuid()).optional(),
})
.refine(
data =>
@@ -685,7 +693,8 @@ export const gastownRouter = router({
data.labels !== undefined ||
data.metadata !== undefined ||
data.rig_id !== undefined ||
- data.parent_bead_id !== undefined,
+ data.parent_bead_id !== undefined ||
+ data.depends_on !== undefined,
{ message: 'At least one field to update must be provided' }
)
)
@@ -707,6 +716,56 @@ export const gastownRouter = router({
return townStub.updateBead(beadId, fields, ctx.userId);
}),
+ convoyAddBead: gastownProcedure
+ .input(
+ z.object({
+ townId: z.string().uuid(),
+ convoyId: z.string().uuid(),
+ beadId: z.string().uuid(),
+ depends_on: z.array(z.string().uuid()).optional(),
+ })
+ )
+ .output(z.object({ total_beads: z.number() }))
+ .mutation(async ({ ctx, input }) => {
+ await verifyTownOwnership(ctx.env, ctx, input.townId);
+ const townStub = getTownDOStub(ctx.env, input.townId);
+ return townStub.convoyAddBead(input.convoyId, input.beadId, input.depends_on);
+ }),
+
+ convoyRemoveBead: gastownProcedure
+ .input(
+ z.object({
+ townId: z.string().uuid(),
+ convoyId: z.string().uuid(),
+ beadId: z.string().uuid(),
+ })
+ )
+ .output(z.object({ total_beads: z.number() }))
+ .mutation(async ({ ctx, input }) => {
+ await verifyTownOwnership(ctx.env, ctx, input.townId);
+ const townStub = getTownDOStub(ctx.env, input.townId);
+ return townStub.convoyRemoveBead(input.convoyId, input.beadId);
+ }),
+
+ deleteBeadsByStatus: gastownProcedure
+ .input(
+ z.object({
+ rigId: z.string().uuid(),
+ status: z.enum(['open', 'in_progress', 'in_review', 'closed', 'failed']),
+ type: z
+ .enum(['issue', 'message', 'escalation', 'merge_request', 'convoy', 'molecule', 'agent'])
+ .optional(),
+ townId: z.string().uuid().optional(),
+ })
+ )
+ .output(z.object({ deleted: z.number() }))
+ .mutation(async ({ ctx, input }) => {
+ const rig = await verifyRigOwnership(ctx.env, ctx, input.rigId, input.townId);
+ const townStub = getTownDOStub(ctx.env, rig.town_id);
+ const count = await townStub.deleteBeadsByStatus(input.status, input.type, rig.id);
+ return { deleted: count };
+ }),
+
// ── Agents ──────────────────────────────────────────────────────────
listAgents: gastownProcedure
@@ -1592,6 +1651,37 @@ export const gastownRouter = router({
return townStub.getBeadAsync(input.beadId);
}),
+ adminBulkDeleteBeads: adminProcedure
+ .input(
+ z.object({
+ townId: z.string().uuid(),
+ beadIds: z.array(z.string().uuid()),
+ })
+ )
+ .output(z.object({ deleted: z.number() }))
+ .mutation(async ({ ctx, input }) => {
+ const townStub = getTownDOStub(ctx.env, input.townId);
+ const count = await townStub.deleteBeads(input.beadIds);
+ return { deleted: count };
+ }),
+
+ adminDeleteBeadsByStatus: adminProcedure
+ .input(
+ z.object({
+ townId: z.string().uuid(),
+ status: z.enum(['open', 'in_progress', 'in_review', 'closed', 'failed']),
+ type: z
+ .enum(['issue', 'message', 'escalation', 'merge_request', 'convoy', 'molecule', 'agent'])
+ .optional(),
+ })
+ )
+ .output(z.object({ deleted: z.number() }))
+ .mutation(async ({ ctx, input }) => {
+ const townStub = getTownDOStub(ctx.env, input.townId);
+ const count = await townStub.deleteBeadsByStatus(input.status, input.type);
+ return { deleted: count };
+ }),
+
// DEBUG: raw agent_metadata dump — remove after debugging
debugAgentMetadata: adminProcedure
.input(z.object({ townId: z.string().uuid() }))
diff --git a/services/gastown/src/types.ts b/services/gastown/src/types.ts
index 7579a4810d..73b7bf604b 100644
--- a/services/gastown/src/types.ts
+++ b/services/gastown/src/types.ts
@@ -177,6 +177,14 @@ export type PrimeContext = {
branch: string | null;
target_branch: string | null;
} | null;
+ /** Present when the hooked bead is a PR conflict resolution (gt:pr-conflict label). */
+ pr_conflict_context: {
+ pr_url: string | null;
+ branch: string | null;
+ target_branch: string | null;
+ /** When true, the bead also has pending review feedback to address after resolving conflicts. */
+ has_feedback: boolean;
+ } | null;
};
// -- Agent done --
@@ -275,6 +283,9 @@ export const TownConfigSchema = z.object({
/** When enabled, a polecat is automatically dispatched to address
* unresolved review comments and failing CI checks on open PRs. */
auto_resolve_pr_feedback: z.boolean().default(false),
+ /** When enabled, a polecat is automatically dispatched to rebase and
+ * resolve merge conflicts on open PRs. */
+ auto_resolve_merge_conflicts: z.boolean().default(true).optional(),
/** After all CI checks pass and all review threads are resolved,
* automatically merge the PR after this many minutes.
* 0 = immediate, null = disabled (require manual merge). */
@@ -347,6 +358,7 @@ export const RigOverrideConfigSchema = z.object({
/** false = skip refinery entirely */
code_review: z.boolean().optional(),
auto_resolve_pr_feedback: z.boolean().optional(),
+ auto_resolve_merge_conflicts: z.boolean().optional(),
auto_merge_delay_minutes: z.number().int().min(0).nullable().optional(),
// Merge strategy
@@ -412,6 +424,7 @@ export const TownConfigUpdateSchema = z.object({
code_review: z.boolean().optional(),
review_mode: z.enum(['rework', 'comments']).optional(),
auto_resolve_pr_feedback: z.boolean().optional(),
+ auto_resolve_merge_conflicts: z.boolean().optional(),
auto_merge_delay_minutes: z.number().int().min(0).nullable().optional(),
})
.optional(),
diff --git a/services/gastown/src/util/analytics.util.ts b/services/gastown/src/util/analytics.util.ts
index 0a69c76c82..b796764693 100644
--- a/services/gastown/src/util/analytics.util.ts
+++ b/services/gastown/src/util/analytics.util.ts
@@ -44,6 +44,11 @@ export type GastownEventData = {
durationMs?: number;
value?: number;
label?: string;
+ // Container cold-start instrumentation fields.
+ // Use durationMs for timing (event name disambiguates the metric).
+ // Use error (absence = success) instead of a wasSuccess boolean.
+ statusCode?: number;
+ containerStartedAt?: string;
// Additional doubles for reconciler_tick events (double3–double10).
// Analytics Engine supports up to 20 doubles per data point.
double3?: number;
@@ -82,6 +87,7 @@ export function writeEvent(
data.role ?? '', // blob12
data.beadType ?? '', // blob13
data.reason ?? '', // blob14
+ data.containerStartedAt ?? '', // blob15
],
doubles: [
data.durationMs ?? 0, // double1
@@ -94,6 +100,7 @@ export function writeEvent(
data.double8 ?? 0, // double8
data.double9 ?? 0, // double9
data.double10 ?? 0, // double10
+ data.statusCode ?? 0, // double11
],
indexes: [data.event],
});
diff --git a/services/gastown/src/util/platform-pr.util.ts b/services/gastown/src/util/platform-pr.util.ts
index c322bae5fa..c05bff3431 100644
--- a/services/gastown/src/util/platform-pr.util.ts
+++ b/services/gastown/src/util/platform-pr.util.ts
@@ -174,6 +174,8 @@ ${diffSection}
export const GitHubPRStatusSchema = z.object({
state: z.string(),
merged: z.boolean().optional(),
+ mergeable: z.boolean().nullable().optional(),
+ mergeable_state: z.string().optional(), // 'clean', 'dirty', 'blocked', 'unknown', 'unstable'
});
/** Schema for GitLab MR status responses (used by checkPRStatus). */
diff --git a/services/gastown/test/integration/convoy-dag.test.ts b/services/gastown/test/integration/convoy-dag.test.ts
index e9d22cbc86..0d23192bf9 100644
--- a/services/gastown/test/integration/convoy-dag.test.ts
+++ b/services/gastown/test/integration/convoy-dag.test.ts
@@ -576,4 +576,114 @@ describe('Convoy DAG and Feature Branches', () => {
expect(mrBead?.metadata?.convoy_id).toBe(result.convoy.id);
});
});
+
+ // ── convoyAddBead: staged guard ─────────────────────────────────────
+
+ describe('convoyAddBead: staged convoy guard', () => {
+ it('should insert tracks row with correct direction (bead_id=task, depends_on_bead_id=convoy)', async () => {
+ await town.addRig({
+ rigId: 'rig-1',
+ name: 'main-rig',
+ gitUrl: 'https://github.com/test/repo.git',
+ defaultBranch: 'main',
+ });
+
+ // Create a staged convoy with one initial task
+ const convoy = await town.slingConvoy({
+ rigId: 'rig-1',
+ convoyTitle: 'Staged Add Bead',
+ tasks: [{ title: 'Initial task' }],
+ staged: true,
+ });
+
+ // Create an extra bead to add to the convoy
+ const newBead = await town.createBead({
+ type: 'issue',
+ title: 'Extra task',
+ rig_id: 'rig-1',
+ });
+
+ await town.convoyAddBead(convoy.convoy.id, newBead.bead_id);
+
+ // Verify via getConvoyStatus: the new bead should appear in tracked beads
+ const status = await town.getConvoyStatus(convoy.convoy.id);
+ expect(status).toBeTruthy();
+ const trackedBeadIds = status!.beads.map(b => b.bead_id);
+ expect(trackedBeadIds).toContain(newBead.bead_id);
+ });
+
+ it('should hold newly added bead unassigned while convoy is staged', async () => {
+ await town.addRig({
+ rigId: 'rig-1',
+ name: 'main-rig',
+ gitUrl: 'https://github.com/test/repo.git',
+ defaultBranch: 'main',
+ });
+
+ // Create a staged convoy
+ const convoy = await town.slingConvoy({
+ rigId: 'rig-1',
+ convoyTitle: 'Staged Hold Test',
+ tasks: [{ title: 'Placeholder' }],
+ staged: true,
+ });
+
+ // Create and add a bead to the staged convoy
+ const newBead = await town.createBead({
+ type: 'issue',
+ title: 'Held bead',
+ rig_id: 'rig-1',
+ });
+
+ await town.convoyAddBead(convoy.convoy.id, newBead.bead_id);
+
+ // Run the reconciler — the bead must NOT be dispatched because the convoy is staged
+ await runDurableObjectAlarm(town);
+
+ const bead = await town.getBeadAsync(newBead.bead_id);
+ expect(bead?.status).toBe('open');
+ expect(bead?.assignee_agent_bead_id).toBeNull();
+ });
+
+ it('should dispatch newly added bead after convoy is started', async () => {
+ await town.addRig({
+ rigId: 'rig-1',
+ name: 'main-rig',
+ gitUrl: 'https://github.com/test/repo.git',
+ defaultBranch: 'main',
+ });
+
+ // Create a staged convoy
+ const convoy = await town.slingConvoy({
+ rigId: 'rig-1',
+ convoyTitle: 'Start After Add',
+ tasks: [{ title: 'Placeholder' }],
+ staged: true,
+ });
+
+ // Add a bead to the staged convoy
+ const newBead = await town.createBead({
+ type: 'issue',
+ title: 'Will be dispatched after start',
+ rig_id: 'rig-1',
+ });
+
+ await town.convoyAddBead(convoy.convoy.id, newBead.bead_id);
+
+ // Start the convoy — this clears staged=1
+ await town.startConvoy(convoy.convoy.id);
+
+ // Run the reconciler — bead should now be dispatched
+ await runDurableObjectAlarm(town);
+
+ const bead = await town.getBeadAsync(newBead.bead_id);
+ expect(bead?.assignee_agent_bead_id).toBeTruthy();
+ });
+
+ // Note: testing that convoyAddBead rejects non-staged or closed convoys
+ // cannot be done in workers integration tests — DO exceptions corrupt the
+ // vitest-pool-workers isolated storage stack frame (same limitation as
+ // cycle detection tests above). The validation is exercised by unit tests
+ // in test/unit/convoy-add-bead-validation.test.ts instead.
+ });
});
diff --git a/services/gastown/test/integration/review-failure.test.ts b/services/gastown/test/integration/review-failure.test.ts
index d5b7773c00..8fcb07cb0e 100644
--- a/services/gastown/test/integration/review-failure.test.ts
+++ b/services/gastown/test/integration/review-failure.test.ts
@@ -182,29 +182,6 @@ describe('Review failure paths — convoy progress and source bead recovery', ()
});
});
- // ── Direct completeReview leaves source bead orphaned (regression) ─
-
- describe('completeReview bypass (regression guard)', () => {
- it('should leave source bead stuck in in_review when completeReview is called directly', async () => {
- const { beadId, mrBeadId } = await setupConvoyWithMR();
-
- // Call completeReview directly (the OLD broken path) —
- // this is the raw SQL update that bypasses lifecycle events.
- // We use this to verify the regression scenario.
- await town.completeReview(mrBeadId, 'failed');
-
- // MR bead should be failed
- const mrBead = await town.getBeadAsync(mrBeadId);
- expect(mrBead?.status).toBe('failed');
-
- // Source bead is STILL in_review — this is the bug this PR fixes
- // in processReviewQueue. The direct completeReview call doesn't
- // return the source bead to in_progress.
- const sourceBead = await town.getBeadAsync(beadId);
- expect(sourceBead?.status).toBe('in_review');
- });
- });
-
// ── Source bead in_review after agentDone ──────────────────────────
describe('agentDone transitions source bead to in_review', () => {
diff --git a/services/gastown/test/integration/rig-alarm.test.ts b/services/gastown/test/integration/rig-alarm.test.ts
index a80cfc6b5f..1ec79e6962 100644
--- a/services/gastown/test/integration/rig-alarm.test.ts
+++ b/services/gastown/test/integration/rig-alarm.test.ts
@@ -158,9 +158,10 @@ describe('Town DO Alarm', () => {
// fail gracefully and mark the review as 'failed'
await runDurableObjectAlarm(town);
- // The pending entry should have been popped (no more pending entries)
- const nextEntry = await town.popReviewQueue();
- expect(nextEntry).toBeNull();
+ // The MR bead should no longer be open (alarm processed it)
+ const mrBeads = await town.listBeads({ type: 'merge_request' });
+ expect(mrBeads).toHaveLength(1);
+ expect(mrBeads[0].status).not.toBe('open');
});
});
@@ -293,9 +294,10 @@ describe('Town DO Alarm', () => {
// (will fail at container level but that's expected in tests)
await runDurableObjectAlarm(town);
- // Review queue entry should have been popped and processed (failed in test env)
- const reviewEntry = await town.popReviewQueue();
- expect(reviewEntry).toBeNull();
+ // MR bead should have been picked up and processed (failed in test env)
+ const mrBeads = await town.listBeads({ type: 'merge_request' });
+ expect(mrBeads).toHaveLength(1);
+ expect(mrBeads[0].status).not.toBe('open');
});
});
});
diff --git a/services/gastown/test/integration/rig-do.test.ts b/services/gastown/test/integration/rig-do.test.ts
index eb22196fd0..221f5bce67 100644
--- a/services/gastown/test/integration/rig-do.test.ts
+++ b/services/gastown/test/integration/rig-do.test.ts
@@ -356,7 +356,7 @@ describe('TownDO', () => {
// ── Review Queue ───────────────────────────────────────────────────────
describe('review queue', () => {
- it('should submit to and pop from review queue', async () => {
+ it('should submit to review queue and create an open merge_request bead', async () => {
const agent = await town.registerAgent({
role: 'polecat',
name: 'P1',
@@ -373,40 +373,12 @@ describe('TownDO', () => {
summary: 'Fixed the widget',
});
- const entry = await town.popReviewQueue();
- expect(entry).toBeDefined();
- expect(entry?.branch).toBe('feature/fix-widget');
- expect(entry?.pr_url).toBe('https://github.com/org/repo/pull/1');
- expect(entry?.status).toBe('running');
-
- // Pop again should return null (nothing pending)
- const empty = await town.popReviewQueue();
- expect(empty).toBeNull();
- });
-
- it('should complete a review', async () => {
- const agent = await town.registerAgent({
- role: 'polecat',
- name: 'P1',
- identity: `complete-review-${townName}`,
- });
- const bead = await town.createBead({ type: 'issue', title: 'Review complete' });
-
- await town.submitToReviewQueue({
- agent_id: agent.id,
- bead_id: bead.bead_id,
- rig_id: 'test-rig',
- branch: 'feature/fix',
- });
-
- const entry = await town.popReviewQueue();
- expect(entry).toBeDefined();
-
- await town.completeReview(entry!.id, 'merged');
-
- // Pop again should be null
- const empty = await town.popReviewQueue();
- expect(empty).toBeNull();
+ // submitToReviewQueue creates an open merge_request bead
+ const mrBeads = await town.listBeads({ type: 'merge_request' });
+ expect(mrBeads).toHaveLength(1);
+ expect(mrBeads[0].status).toBe('open');
+ expect(mrBeads[0].metadata?.pr_url).toBe('https://github.com/org/repo/pull/1');
+ expect(mrBeads[0].metadata?.source_bead_id).toBe(bead.bead_id);
});
it('should close bead on successful merge via completeReviewWithResult', async () => {
@@ -424,11 +396,12 @@ describe('TownDO', () => {
branch: 'feature/merge-test',
});
- const entry = await town.popReviewQueue();
- expect(entry).toBeDefined();
+ const mrBeads = await town.listBeads({ type: 'merge_request' });
+ expect(mrBeads).toHaveLength(1);
+ const mrBeadId = mrBeads[0].bead_id;
await town.completeReviewWithResult({
- entry_id: entry!.id,
+ entry_id: mrBeadId,
status: 'merged',
message: 'Merge successful',
commit_sha: 'abc123',
@@ -439,9 +412,9 @@ describe('TownDO', () => {
expect(updatedBead?.status).toBe('closed');
expect(updatedBead?.closed_at).toBeDefined();
- // Review queue should be empty
- const empty = await town.popReviewQueue();
- expect(empty).toBeNull();
+ // MR bead should be closed
+ const updatedMr = await town.getBeadAsync(mrBeadId);
+ expect(updatedMr?.status).toBe('closed');
});
it('should create escalation bead on merge conflict via completeReviewWithResult', async () => {
@@ -459,11 +432,12 @@ describe('TownDO', () => {
branch: 'feature/conflict-test',
});
- const entry = await town.popReviewQueue();
- expect(entry).toBeDefined();
+ const mrBeads = await town.listBeads({ type: 'merge_request' });
+ expect(mrBeads).toHaveLength(1);
+ const mrBeadId = mrBeads[0].bead_id;
await town.completeReviewWithResult({
- entry_id: entry!.id,
+ entry_id: mrBeadId,
status: 'conflict',
message: 'CONFLICT (content): Merge conflict in src/index.ts',
});
@@ -484,9 +458,9 @@ describe('TownDO', () => {
agent_id: agent.id,
});
- // Review queue entry should be marked as failed
- const empty = await town.popReviewQueue();
- expect(empty).toBeNull();
+ // MR bead should be marked as failed
+ const updatedMr = await town.getBeadAsync(mrBeadId);
+ expect(updatedMr?.status).toBe('failed');
});
});
diff --git a/services/gastown/test/unit/convoy-add-bead-validation.test.ts b/services/gastown/test/unit/convoy-add-bead-validation.test.ts
new file mode 100644
index 0000000000..0b4a4f66bb
--- /dev/null
+++ b/services/gastown/test/unit/convoy-add-bead-validation.test.ts
@@ -0,0 +1,77 @@
+/**
+ * Unit tests for convoyAddBead validation rules.
+ *
+ * These test the guard conditions from Town.do.ts::convoyAddBead.
+ * Integration tests for the happy path (bead held during staged, dispatched
+ * after start) live in test/integration/convoy-dag.test.ts.
+ *
+ * DO exceptions corrupt vitest-pool-workers isolated storage, so rejection
+ * tests must live here as pure unit tests.
+ */
+import { describe, it, expect } from 'vitest';
+
+type ConvoyStatus = 'active' | 'landed';
+type BeadStatus = 'open' | 'in_progress' | 'in_review' | 'closed' | 'failed';
+
+type ConvoyEntry = {
+ id: string;
+ staged: boolean;
+ status: ConvoyStatus;
+};
+
+/**
+ * Pure reimplementation of the convoyAddBead validation from Town.do.ts.
+ * Kept in sync with:
+ * Town.do.ts::convoyAddBead (lines ~1220-1225)
+ *
+ * The rawStatus parameter mirrors convoyRecord.status (the actual bead status
+ * before mapping to ConvoyEntry.status). This is needed because toConvoy()
+ * collapses 'failed' → 'active', so the ConvoyEntry.status check alone would
+ * not block failed convoys.
+ */
+function validateConvoyAddBead(convoy: ConvoyEntry | null, rawStatus?: BeadStatus): void {
+ if (!convoy) throw new Error(`Bead is not a convoy`);
+ if (!convoy.staged) throw new Error(`Cannot add beads to a non-staged convoy: ${convoy.id}`);
+ if (convoy.status === 'landed')
+ throw new Error(`Cannot add beads to a closed convoy: ${convoy.id}`);
+ if (rawStatus === 'failed') throw new Error(`Cannot add beads to a failed convoy: ${convoy.id}`);
+}
+
+describe('convoyAddBead validation', () => {
+ it('allows adding to a staged active convoy', () => {
+ expect(() =>
+ validateConvoyAddBead({ id: 'convoy-1', staged: true, status: 'active' }, 'in_progress')
+ ).not.toThrow();
+ });
+
+ it('rejects null convoy (not found)', () => {
+ expect(() => validateConvoyAddBead(null)).toThrow(/not a convoy/);
+ });
+
+ it('rejects non-staged (active) convoy', () => {
+ expect(() =>
+ validateConvoyAddBead({ id: 'convoy-1', staged: false, status: 'active' }, 'open')
+ ).toThrow(/non-staged/);
+ });
+
+ it('rejects staged but landed convoy', () => {
+ // Landed convoys have staged=false in practice, but guard against status too
+ expect(() =>
+ validateConvoyAddBead({ id: 'convoy-1', staged: true, status: 'landed' }, 'closed')
+ ).toThrow(/closed/);
+ });
+
+ it('rejects non-staged landed convoy', () => {
+ expect(() =>
+ validateConvoyAddBead({ id: 'convoy-1', staged: false, status: 'landed' }, 'closed')
+ ).toThrow(/non-staged/);
+ });
+
+ it('rejects failed convoy (toConvoy maps failed → active, raw check required)', () => {
+ // toConvoy() maps 'failed' bead status to ConvoyEntry.status='active',
+ // so without a raw status check the guard would pass a failed convoy.
+ expect(() =>
+ validateConvoyAddBead({ id: 'convoy-1', staged: true, status: 'active' }, 'failed')
+ ).toThrow(/failed convoy/);
+ });
+});
diff --git a/services/gastown/wrangler.jsonc b/services/gastown/wrangler.jsonc
index b3fc1fac2e..4cc1410810 100644
--- a/services/gastown/wrangler.jsonc
+++ b/services/gastown/wrangler.jsonc
@@ -37,7 +37,7 @@
"class_name": "TownContainerDO",
"image": "./container/Dockerfile",
"instance_type": "standard-4",
- "max_instances": 700,
+ "max_instances": 800,
},
],
|