Skip to content

Commit 44fc5ef

Browse files
authored
Merge pull request #89 from ndycode/fix/max-output-tokens
fix: preserve max_output_tokens in transformed requests
2 parents 405ef3f + e2e756d commit 44fc5ef

File tree

3 files changed

+70
-34
lines changed

3 files changed

+70
-34
lines changed

lib/request/request-transformer.ts

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1118,8 +1118,12 @@ export async function transformRequestBody(
11181118
// This allows reasoning context to persist across turns without server-side storage
11191119
body.include = resolveInclude(modelConfig, body);
11201120

1121-
// Remove unsupported parameters
1122-
body.max_output_tokens = undefined;
1121+
// Preserve caller-supplied max_output_tokens from the in-memory request body.
1122+
// max_output_tokens is a numeric budget, not a credential, so no redaction is required.
1123+
// Windows filesystem: no file I/O occurs here; no concurrency risk is introduced.
1124+
// Regression: "should preserve max_output_tokens while removing max_completion_tokens"
1125+
// in test/request-transformer.test.ts.
1126+
// Remove unsupported parameters.
11231127
body.max_completion_tokens = undefined;
11241128

11251129
return body;

test/property/transformer.property.test.ts

Lines changed: 53 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,9 @@ import {
44
normalizeModel,
55
filterInput,
66
getReasoningConfig,
7+
transformRequestBody,
78
} from "../../lib/request/request-transformer.js";
8-
import type { InputItem } from "../../lib/types.js";
9+
import type { InputItem, RequestBody } from "../../lib/types.js";
910
import { arbModel, arbMessageRole } from "./helpers.js";
1011

1112
describe("normalizeModel property tests", () => {
@@ -216,44 +217,43 @@ describe("getReasoningConfig property tests", () => {
216217
);
217218
});
218219

219-
it("codex models upgrade none to low", () => {
220-
fc.assert(
221-
fc.property(
222-
fc.constantFrom(
223-
"gpt-5.1-codex",
224-
"gpt-5.2-codex",
225-
"gpt-5.3-codex",
226-
"gpt-5.1-codex-max",
227-
),
228-
(model) => {
229-
const result = getReasoningConfig(model, { reasoningEffort: "none" });
230-
expect(result.effort).toBe("low");
220+
it("codex models upgrade none to low", () => {
221+
fc.assert(
222+
fc.property(
223+
fc.constantFrom(
224+
"gpt-5.1-codex",
225+
"gpt-5.2-codex",
226+
"gpt-5.3-codex",
227+
"gpt-5.1-codex-max",
228+
),
229+
(model) => {
230+
const result = getReasoningConfig(model, { reasoningEffort: "none" });
231+
expect(result.effort).toBe("low");
231232
return true;
232233
}
233234
)
234235
);
235236
});
236237

237-
it("gpt-5.4-pro upgrades none to medium (none→low→medium chain)", () => {
238-
fc.assert(
239-
fc.property(fc.constant("gpt-5.4-pro"), (model) => {
240-
const result = getReasoningConfig(model, { reasoningEffort: "none" });
241-
expect(result.effort).toBe("medium");
242-
return true;
243-
})
244-
);
238+
it("gpt-5.4-pro upgrades none to medium (none→low→medium chain)", () => {
239+
fc.assert(
240+
fc.property(fc.constant("gpt-5.4-pro"), (model) => {
241+
const result = getReasoningConfig(model, { reasoningEffort: "none" });
242+
expect(result.effort).toBe("medium");
243+
return true;
244+
})
245+
);
245246
});
246247

247-
it("gpt-5.4-pro upgrades minimal to medium", () => {
248-
fc.assert(
249-
fc.property(fc.constant("gpt-5.4-pro"), (model) => {
250-
const result = getReasoningConfig(model, { reasoningEffort: "minimal" });
251-
expect(result.effort).toBe("medium");
252-
return true;
253-
})
254-
);
255-
});
256-
248+
it("gpt-5.4-pro upgrades minimal to medium", () => {
249+
fc.assert(
250+
fc.property(fc.constant("gpt-5.4-pro"), (model) => {
251+
const result = getReasoningConfig(model, { reasoningEffort: "minimal" });
252+
expect(result.effort).toBe("medium");
253+
return true;
254+
})
255+
);
256+
});
257257
it("gpt-5.1, gpt-5.2, and gpt-5.4 general support none effort", () => {
258258
fc.assert(
259259
fc.property(
@@ -273,3 +273,25 @@ describe("getReasoningConfig property tests", () => {
273273
expect(result.summary).toBeDefined();
274274
});
275275
});
276+
277+
describe("transformRequestBody property tests", () => {
278+
it("preserves max_output_tokens across arbitrary positive integers", async () => {
279+
await fc.assert(
280+
fc.asyncProperty(
281+
fc.constantFrom("gpt-5", "gpt-5.4-pro", "gpt-5.1-codex"),
282+
fc.integer({ min: 1, max: 1_000_000 }),
283+
async (model, maxOutputTokens) => {
284+
const body: RequestBody = {
285+
model,
286+
input: [],
287+
max_output_tokens: maxOutputTokens,
288+
};
289+
290+
const result = await transformRequestBody(body, "Test Codex Instructions");
291+
expect(result.max_output_tokens).toBe(maxOutputTokens);
292+
expect(result.max_completion_tokens).toBeUndefined();
293+
return true;
294+
})
295+
);
296+
});
297+
});

test/request-transformer.test.ts

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1483,14 +1483,24 @@ describe('Request Transformer Module', () => {
14831483
expect(result.input![0].role).toBe('user');
14841484
});
14851485

1486-
it('should remove unsupported parameters', async () => {
1486+
it('should preserve max_output_tokens while removing max_completion_tokens', async () => {
14871487
const body: RequestBody = {
14881488
model: 'gpt-5',
14891489
input: [],
14901490
max_output_tokens: 1000,
14911491
max_completion_tokens: 2000,
14921492
};
14931493
const result = await transformRequestBody(body, codexInstructions);
1494+
expect(result.max_output_tokens).toBe(1000);
1495+
expect(result.max_completion_tokens).toBeUndefined();
1496+
});
1497+
1498+
it('should leave max_output_tokens unset when not provided', async () => {
1499+
const body: RequestBody = {
1500+
model: 'gpt-5',
1501+
input: [],
1502+
};
1503+
const result = await transformRequestBody(body, codexInstructions);
14941504
expect(result.max_output_tokens).toBeUndefined();
14951505
expect(result.max_completion_tokens).toBeUndefined();
14961506
});

0 commit comments

Comments
 (0)