|
| 1 | +name: ContextBench CBM Readiness Retry |
| 2 | + |
| 3 | +on: |
| 4 | + push: |
| 5 | + branches: [master] |
| 6 | + paths: |
| 7 | + - .github/workflows/contextbench-cbm-readiness-retry.yml |
| 8 | + workflow_dispatch: |
| 9 | + inputs: |
| 10 | + max_tasks: |
| 11 | + description: 'Number of first tasks to run for codebase-memory readiness' |
| 12 | + required: true |
| 13 | + default: '3' |
| 14 | + codebase_memory_version: |
| 15 | + description: 'codebase-memory-mcp release tag' |
| 16 | + required: true |
| 17 | + default: 'v0.6.1' |
| 18 | + |
| 19 | +permissions: |
| 20 | + contents: read |
| 21 | + |
| 22 | +jobs: |
| 23 | + codebase-memory-first3-readiness: |
| 24 | + runs-on: ubuntu-latest |
| 25 | + timeout-minutes: 360 |
| 26 | + env: |
| 27 | + ROOT: /tmp/contextbench-cbm-readiness |
| 28 | + TASK_PAYLOADS: /tmp/contextbench-cbm-readiness/task-payloads.json |
| 29 | + CHECKOUT_ROOT: /tmp/contextbench-checkouts |
| 30 | + CBM_VERSION: ${{ github.event.inputs.codebase_memory_version || 'v0.6.1' }} |
| 31 | + MAX_TASKS: ${{ github.event.inputs.max_tasks || '3' }} |
| 32 | + steps: |
| 33 | + - uses: actions/checkout@v4 |
| 34 | + - uses: pnpm/action-setup@v2 |
| 35 | + with: |
| 36 | + version: 10 |
| 37 | + - uses: actions/setup-node@v4 |
| 38 | + with: |
| 39 | + node-version: '24' |
| 40 | + cache: 'pnpm' |
| 41 | + - uses: actions/setup-python@v5 |
| 42 | + with: |
| 43 | + python-version: '3.11' |
| 44 | + - name: Install dependencies |
| 45 | + run: | |
| 46 | + pnpm install --frozen-lockfile |
| 47 | + python -m pip install "tree-sitter==0.20.4" "tree-sitter-languages==1.10.2" datasets pyarrow |
| 48 | + - name: Validate fixtures and materialize first tasks |
| 49 | + run: | |
| 50 | + mkdir -p "$ROOT" "$CHECKOUT_ROOT" |
| 51 | + node scripts/contextbench-runner.mjs --validate-fixtures |
| 52 | + node scripts/contextbench-select-slice.mjs --write-task-payloads --out "$TASK_PAYLOADS" --checkout-root "$CHECKOUT_ROOT" |
| 53 | + node scripts/contextbench-select-slice.mjs --materialize-checkouts --payloads "$TASK_PAYLOADS" --max-tasks "$MAX_TASKS" |
| 54 | + - name: Download codebase-memory-mcp |
| 55 | + run: | |
| 56 | + set -euxo pipefail |
| 57 | + mkdir -p "$ROOT/tool" |
| 58 | + curl -fsSL "https://github.com/DeusData/codebase-memory-mcp/releases/download/${CBM_VERSION}/codebase-memory-mcp-linux-amd64.tar.gz" -o "$ROOT/tool/cbm.tar.gz" |
| 59 | + tar -xzf "$ROOT/tool/cbm.tar.gz" -C "$ROOT/tool" |
| 60 | + chmod +x "$ROOT/tool/codebase-memory-mcp" || true |
| 61 | + "$ROOT/tool/codebase-memory-mcp" --version || true |
| 62 | + - name: Run readiness gate with official evaluator |
| 63 | + env: |
| 64 | + CBM_BIN: /tmp/contextbench-cbm-readiness/tool/codebase-memory-mcp |
| 65 | + run: | |
| 66 | + cat > "$ROOT/readiness.mjs" <<'NODE' |
| 67 | + import { spawnSync } from 'node:child_process'; |
| 68 | + import { existsSync, mkdirSync, readFileSync, writeFileSync } from 'node:fs'; |
| 69 | + import { join } from 'node:path'; |
| 70 | +
|
| 71 | + const root = process.env.ROOT; |
| 72 | + const payloads = JSON.parse(readFileSync(process.env.TASK_PAYLOADS, 'utf8')); |
| 73 | + const tasks = payloads.tasks.slice(0, Number(process.env.MAX_TASKS || '3')); |
| 74 | + const cbm = process.env.CBM_BIN; |
| 75 | + const outRoot = join(root, 'out'); |
| 76 | + mkdirSync(outRoot, { recursive: true }); |
| 77 | +
|
| 78 | + function run(cmd, args, opts = {}) { |
| 79 | + const started = Date.now(); |
| 80 | + const r = spawnSync(cmd, args, { |
| 81 | + cwd: opts.cwd || process.cwd(), |
| 82 | + env: opts.env || process.env, |
| 83 | + encoding: 'utf8', |
| 84 | + timeout: opts.timeoutMs || 20 * 60 * 1000, |
| 85 | + maxBuffer: 64 * 1024 * 1024 |
| 86 | + }); |
| 87 | + return { command: [cmd, ...args].join(' '), cwd: opts.cwd || process.cwd(), status: r.status, signal: r.signal, error: r.error?.message || null, durationMs: Date.now() - started, stdout: r.stdout || '', stderr: r.stderr || '' }; |
| 88 | + } |
| 89 | +
|
| 90 | + function firstOk(label, candidates, opts) { |
| 91 | + const attempts = []; |
| 92 | + for (const args of candidates) { |
| 93 | + const attempt = run(cbm, args, opts); |
| 94 | + attempts.push(attempt); |
| 95 | + if (attempt.status === 0) return { ...attempt, label, attempts }; |
| 96 | + } |
| 97 | + return { ...(attempts.at(-1) || {}), label, attempts }; |
| 98 | + } |
| 99 | +
|
| 100 | + function queryOf(text) { |
| 101 | + return String(text || '').replace(/[`*_#>\[\](){},.;:!?/\\]/g, ' ').split(/\s+/).filter((w) => w.length >= 4).slice(0, 8).join(' '); |
| 102 | + } |
| 103 | +
|
| 104 | + function jsonish(s) { |
| 105 | + const t = String(s || '').trim(); |
| 106 | + if (!t) return null; |
| 107 | + try { return JSON.parse(t); } catch {} |
| 108 | + for (const [a, b] of [['{', '}'], ['[', ']']]) { |
| 109 | + const i = t.indexOf(a), j = t.lastIndexOf(b); |
| 110 | + if (i >= 0 && j > i) { try { return JSON.parse(t.slice(i, j + 1)); } catch {} } |
| 111 | + } |
| 112 | + return null; |
| 113 | + } |
| 114 | +
|
| 115 | + function add(spans, file, start = 1, end = start) { |
| 116 | + if (typeof file !== 'string' || !file) return; |
| 117 | + const clean = file.replace(/^\/+/, ''); |
| 118 | + const s = Math.max(1, Number(start) || 1); |
| 119 | + const e = Math.max(s, Number(end) || s); |
| 120 | + const list = spans.get(clean) || []; |
| 121 | + list.push({ start: s, end: e }); |
| 122 | + spans.set(clean, list); |
| 123 | + } |
| 124 | +
|
| 125 | + function walk(v, spans) { |
| 126 | + if (!v || typeof v !== 'object') return; |
| 127 | + if (Array.isArray(v)) { for (const x of v) walk(x, spans); return; } |
| 128 | + const file = v.file || v.path || v.file_path || v.relative_path || v.filename || v.source_path; |
| 129 | + const start = v.start_line || v.startLine || v.line || v.line_number || v.start || 1; |
| 130 | + const end = v.end_line || v.endLine || v.end || start; |
| 131 | + add(spans, file, start, end); |
| 132 | + for (const x of Object.values(v)) walk(x, spans); |
| 133 | + } |
| 134 | +
|
| 135 | + function textPaths(s, spans) { |
| 136 | + const re = /([A-Za-z0-9_.\/-]+\.(?:js|jsx|ts|tsx|py|go|rs|java|c|cc|cpp|h|hpp|rb|php|cs|kt|swift|vue|svelte|json|yml|yaml|md))(?::|#L|\s+line\s+)?(\d+)?/g; |
| 137 | + let m; |
| 138 | + while ((m = re.exec(String(s || ''))) !== null) add(spans, m[1], m[2] || 1, m[2] || 1); |
| 139 | + } |
| 140 | +
|
| 141 | + const reports = []; |
| 142 | + let ready = true; |
| 143 | + for (const [i, task] of tasks.entries()) { |
| 144 | + const dir = join(outRoot, `${i + 1}-${task.instance_id}`); |
| 145 | + mkdirSync(dir, { recursive: true }); |
| 146 | + const env = { ...process.env, CBM_CACHE_DIR: join(dir, 'cbm-cache'), CBM_DIAGNOSTICS: '1' }; |
| 147 | + const opts = { cwd: task.repo_checkout_path, env, timeoutMs: 120_000 }; |
| 148 | + const query = queryOf(task.problem_statement); |
| 149 | + const firstTerm = query.split(/\s+/)[0] || 'import'; |
| 150 | + const setup = run(cbm, ['--version'], { env, timeoutMs: 60_000 }); |
| 151 | + const indexRun = run(cbm, ['cli', 'index_repository', JSON.stringify({ repo_path: '.' })], { ...opts, timeoutMs: 45 * 60 * 1000 }); |
| 152 | + const listProjects = firstOk('list_projects', [['cli', 'list_projects'], ['cli', 'list_projects', '{}']], opts); |
| 153 | + const graphSchema = firstOk('get_graph_schema', [['cli', 'get_graph_schema'], ['cli', 'get_graph_schema', '{}']], opts); |
| 154 | + const graphSearch = firstOk('search_graph', [['cli', 'search_graph', JSON.stringify({ label: 'Function', limit: 25 })], ['cli', 'search_graph', JSON.stringify({ label: 'Class', limit: 25 })]], opts); |
| 155 | + const codeSearch = firstOk('search_code', [['cli', 'search_code', JSON.stringify({ pattern: query, limit: 25 })], ['cli', 'search_code', JSON.stringify({ pattern: firstTerm, limit: 25 })], ['cli', 'search_code', JSON.stringify({ pattern: '.', limit: 25 })]], opts); |
| 156 | +
|
| 157 | + const spans = new Map(); |
| 158 | + for (const r of [listProjects, graphSchema, graphSearch, codeSearch]) for (const text of [r.stdout, r.stderr]) { const parsed = jsonish(text); if (parsed) walk(parsed, spans); textPaths(text, spans); } |
| 159 | + const predFiles = [...spans.keys()].slice(0, 20); |
| 160 | + const predSpans = Object.fromEntries([...spans.entries()].slice(0, 20)); |
| 161 | + const predictionPath = join(dir, 'prediction.json'); |
| 162 | + writeFileSync(predictionPath, JSON.stringify({ instance_id: task.instance_id, repo_url: task.repo_checkout_path, commit: task.base_commit, traj_data: { pred_steps: [{ files: predFiles, spans: predSpans }], pred_files: predFiles, pred_spans: predSpans }, model_patch: '' }, null, 2)); |
| 163 | +
|
| 164 | + for (const [name, result] of Object.entries({ setup, indexRun, listProjects, graphSchema, graphSearch, codeSearch })) writeFileSync(join(dir, `${name}.json`), JSON.stringify(result, null, 2)); |
| 165 | + const goldPath = join(dir, 'gold.json'); |
| 166 | + const gold = run('node', ['scripts/contextbench-select-slice.mjs', '--write-gold', '--task-id', task.instance_id, '--out', goldPath, '--payloads', process.env.TASK_PAYLOADS], { timeoutMs: 10 * 60 * 1000 }); |
| 167 | + const official = join(root, 'ContextBench-official'); |
| 168 | + if (!existsSync(join(official, 'contextbench', 'evaluate.py'))) run('git', ['clone', '--depth', '1', 'https://github.com/EuniAI/ContextBench.git', official], { timeoutMs: 10 * 60 * 1000 }); |
| 169 | + const scorePath = join(dir, 'official-score.jsonl'); |
| 170 | + const evaluator = run('python', ['-m', 'contextbench.evaluate', '--gold', goldPath, '--pred', predictionPath, '--cache', join(dir, 'repo-cache'), '--out', scorePath], { cwd: official, timeoutMs: 20 * 60 * 1000 }); |
| 171 | + const report = { taskId: task.instance_id, repo: task.repo, setupStatus: setup.status, indexStatus: indexRun.status, toolCallable: [listProjects, graphSchema, graphSearch, codeSearch].some((r) => r.status === 0), nonEmptyPrediction: predFiles.length > 0 && Object.keys(predSpans).length > 0, officialEvaluatorStatus: evaluator.status, officialEvaluatorScoreable: evaluator.status === 0 && existsSync(scorePath), costs: { setupDurationMs: setup.durationMs, indexDurationMs: indexRun.durationMs, queryDurationMs: listProjects.durationMs + graphSchema.durationMs + graphSearch.durationMs + codeSearch.durationMs, evaluatorDurationMs: evaluator.durationMs }, laneIsolation: { allowedTool: 'codebase-memory-mcp', observedCommands: [setup.command, indexRun.command, listProjects.command, graphSchema.command, graphSearch.command, codeSearch.command], observedCwds: [setup.cwd, indexRun.cwd, listProjects.cwd, graphSchema.cwd, graphSearch.cwd, codeSearch.cwd], disallowedNativeReadSearchUsedForPrediction: false }, query, predFiles, commands: { setup, indexRun, listProjects, graphSchema, graphSearch, codeSearch, gold, evaluator } }; |
| 172 | + writeFileSync(join(dir, 'readiness-report.json'), JSON.stringify(report, null, 2)); |
| 173 | + reports.push(report); |
| 174 | + if (!(report.setupStatus === 0 && report.indexStatus === 0 && report.toolCallable && report.nonEmptyPrediction && report.officialEvaluatorScoreable)) ready = false; |
| 175 | + } |
| 176 | + const summary = { createdAt: new Date().toISOString(), lane: 'codebase-memory-mcp', ready, attemptedRows: reports.length, scoreableRows: reports.filter((r) => r.officialEvaluatorScoreable).length, nonEmptyPredictionRows: reports.filter((r) => r.nonEmptyPrediction).length, setupIndexCostReportedSeparately: true, reports }; |
| 177 | + writeFileSync(join(outRoot, 'lane-readiness-codebase-memory-first3.json'), JSON.stringify(summary, null, 2)); |
| 178 | + console.log(JSON.stringify(summary, null, 2)); |
| 179 | + if (!ready) process.exitCode = 1; |
| 180 | + NODE |
| 181 | + node "$ROOT/readiness.mjs" |
| 182 | + - name: Upload readiness artifacts |
| 183 | + if: always() |
| 184 | + uses: actions/upload-artifact@v4 |
| 185 | + with: |
| 186 | + name: contextbench-cbm-readiness-retry |
| 187 | + path: /tmp/contextbench-cbm-readiness |
| 188 | + retention-days: 14 |
0 commit comments