Skip to content

Commit d6414b3

Browse files
committed
Add authenticated one-row ContextBench run
1 parent f6e9303 commit d6414b3

1 file changed

Lines changed: 174 additions & 0 deletions

File tree

Lines changed: 174 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,174 @@
1+
name: ContextBench Real GPT54 Mini Auth One
2+
3+
on:
4+
push:
5+
branches: [master]
6+
paths:
7+
- .github/workflows/contextbench-real-gpt54mini-auth-one.yml
8+
workflow_dispatch:
9+
10+
permissions:
11+
contents: read
12+
13+
jobs:
14+
cbm-go-auth-one:
15+
runs-on: ubuntu-latest
16+
timeout-minutes: 45
17+
env:
18+
ROOT: /tmp/contextbench-real-gpt54mini-auth-one
19+
TASK_PAYLOADS: /tmp/contextbench-real-gpt54mini-auth-one/task-payloads.json
20+
CHECKOUT_ROOT: /tmp/contextbench-checkouts
21+
OPENAI_MODEL: gpt-5.4-mini
22+
OPENAI_REASONING_EFFORT: high
23+
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY || secrets.CODEX_OPENAI_API_KEY || secrets.OPENAI_KEY || secrets.OPENAI_API_KEY_GPT54 || secrets.CONTEXTBENCH_OPENAI_API_KEY }}
24+
CBM_BIN: /tmp/contextbench-real-gpt54mini-auth-one/tool/codebase-memory-mcp
25+
steps:
26+
- uses: actions/checkout@v4
27+
- uses: pnpm/action-setup@v2
28+
with:
29+
version: 10
30+
- uses: actions/setup-node@v4
31+
with:
32+
node-version: '24'
33+
cache: pnpm
34+
- uses: actions/setup-python@v5
35+
with:
36+
python-version: '3.11'
37+
38+
- name: Check model auth
39+
run: |
40+
if [ -z "$OPENAI_API_KEY" ]; then
41+
echo "::error::Missing model auth. Set one Actions secret: OPENAI_API_KEY, CODEX_OPENAI_API_KEY, OPENAI_KEY, OPENAI_API_KEY_GPT54, or CONTEXTBENCH_OPENAI_API_KEY."
42+
exit 78
43+
fi
44+
45+
- name: Install and materialize frozen Go task
46+
run: |
47+
set -euxo pipefail
48+
mkdir -p "$ROOT" "$CHECKOUT_ROOT" "$ROOT/tool"
49+
pnpm install --frozen-lockfile
50+
python -m pip install "tree-sitter==0.20.4" "tree-sitter-languages==1.10.2" datasets pyarrow
51+
curl -fsSL "https://github.com/DeusData/codebase-memory-mcp/releases/download/v0.6.1/codebase-memory-mcp-linux-amd64.tar.gz" -o "$ROOT/tool/cbm.tar.gz"
52+
tar -xzf "$ROOT/tool/cbm.tar.gz" -C "$ROOT/tool"
53+
chmod +x "$CBM_BIN" || true
54+
git clone --depth 1 https://github.com/EuniAI/ContextBench.git "$ROOT/ContextBench-official"
55+
node scripts/contextbench-runner.mjs --validate-fixtures
56+
node scripts/contextbench-select-slice.mjs --write-task-payloads --out "$TASK_PAYLOADS" --checkout-root "$CHECKOUT_ROOT"
57+
node scripts/contextbench-select-slice.mjs --materialize-checkouts --payloads "$TASK_PAYLOADS" --max-tasks 3
58+
59+
- name: Run authenticated scoreable CBM row
60+
env:
61+
OFFICIAL_CONTEXTBENCH: /tmp/contextbench-real-gpt54mini-auth-one/ContextBench-official
62+
run: |
63+
cat > "$ROOT/run.mjs" <<'NODE'
64+
import { spawnSync } from 'node:child_process';
65+
import { existsSync, mkdirSync, readFileSync, writeFileSync } from 'node:fs';
66+
import { basename, join } from 'node:path';
67+
68+
const root = process.env.ROOT;
69+
const payloads = JSON.parse(readFileSync(process.env.TASK_PAYLOADS, 'utf8'));
70+
const task = payloads.tasks[2];
71+
const runDir = join(root, 'row-codebase-memory-mcp-go');
72+
mkdirSync(runDir, { recursive: true });
73+
74+
function run(cmd, args, opts = {}) {
75+
const started = Date.now();
76+
const r = spawnSync(cmd, args, {
77+
cwd: opts.cwd || process.cwd(),
78+
env: opts.env || process.env,
79+
encoding: 'utf8',
80+
timeout: opts.timeoutMs || 600000,
81+
maxBuffer: 128 * 1024 * 1024
82+
});
83+
return { command: [cmd, ...args].join(' '), cwd: opts.cwd || process.cwd(), status: r.status, signal: r.signal, error: r.error?.message || null, durationMs: Date.now() - started, stdout: r.stdout || '', stderr: r.stderr || '' };
84+
}
85+
function queryOf(text) { return String(text || '').replace(/[`*_#>\[\](){},.;:!?/\\]/g, ' ').split(/\s+/).filter((w) => w.length >= 4).slice(0, 10).join(' '); }
86+
function jsonish(text) { const raw = String(text || '').trim(); if (!raw) return null; try { return JSON.parse(raw); } catch {} for (const [a,b] of [['{','}'],['[',']']]) { const i = raw.indexOf(a), j = raw.lastIndexOf(b); if (i >= 0 && j > i) { try { return JSON.parse(raw.slice(i, j + 1)); } catch {} } } return null; }
87+
function norm(file) { return String(file || '').replace(/^\/+/, '').replace(/^\.\//, ''); }
88+
function add(locs, file, start = 1, end = start, source = 'cbm') { const clean = norm(file); if (!clean || clean.includes('..') || clean.includes('://')) return; const s = Math.max(1, Number(start) || 1); locs.push({ file: clean, start: s, end: Math.max(s, Number(end) || s), source }); }
89+
function walk(value, locs) { if (!value || typeof value !== 'object') return; if (Array.isArray(value)) { for (const item of value) walk(item, locs); return; } add(locs, value.file || value.path || value.file_path || value.relative_path || value.filename || value.source_path, value.start_line || value.line || 1, value.end_line || value.line || 1); for (const item of Object.values(value)) walk(item, locs); }
90+
function collect(text, locs) { const parsed = jsonish(text); if (parsed) walk(parsed, locs); const re = /([A-Za-z0-9_.\/-]+\.(?:js|jsx|ts|tsx|py|go|rs|java|c|cc|cpp|h|hpp|rb|php|cs|kt|swift|vue|svelte|json|yml|yaml|md))(?::|#L|\s+line\s+)?(\d+)?/g; let m; while ((m = re.exec(String(text || ''))) !== null) add(locs, m[1], m[2] || 1, m[2] || 1); }
91+
function uniq(locs) { const seen = new Set(), out = []; for (const loc of locs) { const key = `${loc.file}:${loc.start}:${loc.end}`; if (!seen.has(key)) { seen.add(key); out.push(loc); if (out.length >= 80) break; } } return out; }
92+
function addSpan(map, file, start = 1, end = start) { const clean = norm(file); if (!clean) return; const s = Math.max(1, Number(start) || 1); const e = Math.max(s, Number(end) || s); const list = map.get(clean) || []; list.push({ start: s, end: e }); map.set(clean, list); }
93+
94+
async function askModel(candidates, query) {
95+
const started = Date.now();
96+
const candidateFiles = new Set(candidates.map((c) => c.file));
97+
const body = {
98+
model: process.env.OPENAI_MODEL,
99+
reasoning: { effort: process.env.OPENAI_REASONING_EFFORT },
100+
max_output_tokens: 1200,
101+
instructions: 'Select likely ContextBench edit locations using only the provided codebase-memory-mcp candidate locations. Return JSON only.',
102+
input: JSON.stringify({ taskId: task.instance_id, repo: task.repo, lane: 'codebase-memory-mcp', query, problemStatement: task.problem_statement, candidateLocations: candidates.slice(0, 60) }),
103+
text: { format: { type: 'json_schema', name: 'contextbench_selection', strict: true, schema: { type: 'object', additionalProperties: false, required: ['files','spans','notes'], properties: { files: { type: 'array', maxItems: 20, items: { type: 'string' } }, spans: { type: 'array', maxItems: 40, items: { type: 'object', additionalProperties: false, required: ['file','start','end'], properties: { file: { type: 'string' }, start: { type: 'integer', minimum: 1 }, end: { type: 'integer', minimum: 1 } } } }, notes: { type: 'string' } } } } }
104+
};
105+
writeFileSync(join(runDir, 'openai-request.redacted.json'), JSON.stringify({ ...body, input: JSON.parse(body.input) }, null, 2));
106+
const controller = new AbortController();
107+
const timer = setTimeout(() => controller.abort(), 300000);
108+
try {
109+
const res = await fetch('https://api.openai.com/v1/responses', { method: 'POST', headers: { 'Content-Type': 'application/json', Authorization: `Bearer ${process.env.OPENAI_API_KEY}` }, body: JSON.stringify(body), signal: controller.signal });
110+
const text = await res.text();
111+
clearTimeout(timer);
112+
writeFileSync(join(runDir, 'openai-response.json'), text);
113+
if (!res.ok) return { ok: false, status: 'model_error', httpStatus: res.status, error: text.slice(0, 2000), durationMs: Date.now() - started };
114+
const json = JSON.parse(text);
115+
const output = json.output_text || (json.output || []).flatMap((item) => item.content || []).filter((item) => item.type === 'output_text').map((item) => item.text).join('\n');
116+
const parsed = JSON.parse(output);
117+
const files = [...new Set((parsed.files || []).map(norm).filter((file) => candidateFiles.has(file)))].slice(0, 20);
118+
const spans = (parsed.spans || []).map((span) => ({ file: norm(span.file), start: span.start, end: span.end })).filter((span) => candidateFiles.has(span.file)).slice(0, 40);
119+
if (files.length === 0 && spans.length === 0) return { ok: false, status: 'model_empty_after_lane_filter', parsed, durationMs: Date.now() - started };
120+
return { ok: true, status: 'completed', parsed: { files, spans, notes: parsed.notes || '' }, usage: json.usage || null, durationMs: Date.now() - started };
121+
} catch (error) {
122+
clearTimeout(timer);
123+
return { ok: false, status: 'model_timeout_or_error', error: String(error?.message || error), durationMs: Date.now() - started };
124+
}
125+
}
126+
127+
const query = queryOf(task.problem_statement);
128+
const env = { ...process.env, CBM_CACHE_DIR: join(runDir, 'cbm-cache'), CBM_DIAGNOSTICS: '1' };
129+
const setup = run(process.env.CBM_BIN, ['--version'], { env, timeoutMs: 60000 });
130+
const index = run(process.env.CBM_BIN, ['cli', 'index_repository', JSON.stringify({ repo_path: task.repo_checkout_path })], { cwd: task.repo_checkout_path, env, timeoutMs: 2700000 });
131+
const project = (jsonish(index.stdout) || jsonish(index.stderr) || {}).project || basename(task.repo_checkout_path);
132+
const graph = run(process.env.CBM_BIN, ['cli', 'search_graph', JSON.stringify({ project, query, limit: 25 })], { cwd: task.repo_checkout_path, env, timeoutMs: 120000 });
133+
const code = run(process.env.CBM_BIN, ['cli', 'search_code', JSON.stringify({ project, pattern: query.split(/\s+/)[0] || '.', mode: 'compact', limit: 25 })], { cwd: task.repo_checkout_path, env, timeoutMs: 120000 });
134+
for (const [name, value] of Object.entries({ setup, index, graph, code })) writeFileSync(join(runDir, `${name}.json`), JSON.stringify(value, null, 2));
135+
136+
const locs = [];
137+
for (const result of [graph, code]) { collect(result.stdout, locs); collect(result.stderr, locs); }
138+
const candidates = uniq(locs);
139+
writeFileSync(join(runDir, 'candidate-locations.json'), JSON.stringify(candidates, null, 2));
140+
141+
const model = await askModel(candidates, query);
142+
writeFileSync(join(runDir, 'model-result.json'), JSON.stringify(model, null, 2));
143+
const spanMap = new Map();
144+
if (model.ok) { for (const span of model.parsed.spans || []) addSpan(spanMap, span.file, span.start, span.end); for (const file of model.parsed.files || []) addSpan(spanMap, file, 1, 1); }
145+
const predFiles = [...spanMap.keys()].slice(0, 20);
146+
const predSpans = Object.fromEntries([...spanMap.entries()].slice(0, 20));
147+
const predictionPath = join(runDir, 'prediction.json');
148+
writeFileSync(predictionPath, JSON.stringify({ instance_id: task.instance_id, repo_url: task.repo_checkout_path, commit: task.base_commit, traj_data: { pred_steps: [{ files: predFiles, spans: predSpans }], pred_files: predFiles, pred_spans: predSpans }, model_patch: '' }, null, 2));
149+
150+
const goldPath = join(runDir, 'gold.json');
151+
const gold = run('node', ['scripts/contextbench-select-slice.mjs', '--write-gold', '--task-id', task.instance_id, '--out', goldPath, '--payloads', process.env.TASK_PAYLOADS], { timeoutMs: 600000 });
152+
writeFileSync(join(runDir, 'gold-command.json'), JSON.stringify(gold, null, 2));
153+
const scorePath = join(runDir, 'official-score.jsonl');
154+
const evaluator = model.ok && predFiles.length ? run('python', ['-m', 'contextbench.evaluate', '--gold', goldPath, '--pred', predictionPath, '--cache', join(runDir, 'repo-cache'), '--out', scorePath], { cwd: process.env.OFFICIAL_CONTEXTBENCH, timeoutMs: 1200000 }) : { status: null, error: 'skipped_no_model_prediction', durationMs: 0 };
155+
writeFileSync(join(runDir, 'evaluator-command.json'), JSON.stringify(evaluator, null, 2));
156+
let score = null;
157+
if (existsSync(scorePath)) { const lines = readFileSync(scorePath, 'utf8').trim().split(/\n+/).filter(Boolean); if (lines.length) score = JSON.parse(lines.at(-1)); }
158+
159+
const row = { lane_id: 'codebase-memory-mcp', task_id: task.instance_id, model: `${process.env.OPENAI_MODEL}-${process.env.OPENAI_REASONING_EFFORT}`, status: evaluator.status === 0 && score ? 'completed' : (model.status || 'judge_failed'), setupStatus: setup.status === 0 ? 'completed' : 'setup_failed', indexStatus: index.status === 0 ? 'completed' : 'index_failed', toolCallable: graph.status === 0 || code.status === 0, candidateCount: candidates.length, nonEmptyPrediction: predFiles.length > 0, predFiles: predFiles.length, officialEvaluatorScoreable: evaluator.status === 0 && Boolean(score), setupIndex: { setupDurationMs: setup.durationMs, indexDurationMs: index.durationMs, queryDurationMs: graph.durationMs + code.durationMs }, modelStatus: model.status, modelUsage: model.usage || null, score };
160+
const summary = { createdAt: new Date().toISOString(), attemptedRows: 1, scoreableRows: row.officialEvaluatorScoreable ? 1 : 0, setupIndexCostReportedSeparately: true, resultsTable: row.officialEvaluatorScoreable ? [{ lane: row.lane_id, fileCoverage: score.final.file.coverage, filePrecision: score.final.file.precision, symbolCoverage: score.final.symbol.coverage, spanCoverage: score.final.span.coverage, lineCoverage: score.final.line.coverage, editlocRecall: score.editloc?.recall ?? null }] : [], rows: [row] };
161+
writeFileSync(join(runDir, 'row.json'), JSON.stringify(row, null, 2));
162+
writeFileSync(join(root, 'summary.json'), JSON.stringify(summary, null, 2));
163+
console.log(JSON.stringify(summary, null, 2));
164+
if (!row.officialEvaluatorScoreable) process.exitCode = 1;
165+
NODE
166+
node "$ROOT/run.mjs"
167+
168+
- name: Upload authenticated one-row artifacts
169+
if: always()
170+
uses: actions/upload-artifact@v4
171+
with:
172+
name: contextbench-real-gpt54mini-auth-one
173+
path: /tmp/contextbench-real-gpt54mini-auth-one
174+
retention-days: 14

0 commit comments

Comments
 (0)