Skip to content

Commit e52d34e

Browse files
committed
fix(core): handle partial llm_request in BeforeModel hook override
When a BeforeModel hook returns only a model override without messages (e.g. { model: 'gemini-2.5-flash' }), fromHookLLMRequest crashed with TypeError because it called .map() on undefined messages. Guard hookRequest.messages before mapping, falling back to baseRequest?.contents to preserve the conversation. Use nullish coalescing for model so a partial hook response does not overwrite with undefined. Fixes #21847
1 parent 62cb14f commit e52d34e

File tree

2 files changed

+71
-14
lines changed

2 files changed

+71
-14
lines changed

packages/core/src/hooks/hookTranslator.test.ts

Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -121,6 +121,56 @@ describe('HookTranslator', () => {
121121
},
122122
]);
123123
});
124+
125+
it('should apply model override when hook returns only model field', () => {
126+
const baseRequest: GenerateContentParameters = {
127+
model: 'gemini-2.5-flash-lite',
128+
contents: [
129+
{
130+
role: 'user',
131+
parts: [{ text: 'Hello' }],
132+
},
133+
],
134+
} as unknown as GenerateContentParameters;
135+
136+
// Simulate a hook that only overrides the model — no messages field
137+
const hookRequest = {
138+
model: 'gemini-2.5-flash',
139+
} as unknown as LLMRequest;
140+
141+
const sdkRequest = translator.fromHookLLMRequest(
142+
hookRequest,
143+
baseRequest,
144+
);
145+
146+
// Model should be overridden
147+
expect(sdkRequest.model).toBe('gemini-2.5-flash');
148+
// Original conversation contents should be preserved
149+
expect(sdkRequest.contents).toEqual(baseRequest.contents);
150+
});
151+
152+
it('should preserve base request contents when hook messages is undefined', () => {
153+
const baseRequest: GenerateContentParameters = {
154+
model: 'gemini-1.5-flash',
155+
contents: [
156+
{ role: 'user', parts: [{ text: 'original message' }] },
157+
{ role: 'model', parts: [{ text: 'original reply' }] },
158+
],
159+
} as unknown as GenerateContentParameters;
160+
161+
const hookRequest = {
162+
model: 'gemini-1.5-pro',
163+
// messages intentionally omitted
164+
} as unknown as LLMRequest;
165+
166+
const sdkRequest = translator.fromHookLLMRequest(
167+
hookRequest,
168+
baseRequest,
169+
);
170+
171+
expect(sdkRequest.model).toBe('gemini-1.5-pro');
172+
expect(sdkRequest.contents).toEqual(baseRequest.contents);
173+
});
124174
});
125175

126176
describe('LLM Response Translation', () => {

packages/core/src/hooks/hookTranslator.ts

Lines changed: 21 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -225,23 +225,30 @@ export class HookTranslatorGenAIv1 extends HookTranslator {
225225
hookRequest: LLMRequest,
226226
baseRequest?: GenerateContentParameters,
227227
): GenerateContentParameters {
228-
// Convert hook messages back to SDK Content format
229-
const contents = hookRequest.messages.map((message) => ({
230-
role: message.role === 'model' ? 'model' : message.role,
231-
parts: [
232-
{
233-
text:
234-
typeof message.content === 'string'
235-
? message.content
236-
: String(message.content),
237-
},
238-
],
239-
}));
228+
// Convert hook messages back to SDK Content format.
229+
// If the hook returned a partial request without messages (e.g. only
230+
// overriding `model`), fall back to the base request's contents so the
231+
// conversation is preserved.
232+
const contents = hookRequest.messages
233+
? hookRequest.messages.map((message) => ({
234+
role: message.role === 'model' ? 'model' : message.role,
235+
parts: [
236+
{
237+
text:
238+
typeof message.content === 'string'
239+
? message.content
240+
: String(message.content),
241+
},
242+
],
243+
}))
244+
: (baseRequest?.contents ?? []);
240245

241-
// Build the result with proper typing
246+
// Build the result with proper typing.
247+
// Use nullish coalescing so a hook that only sets `model` still works --
248+
// fall back to the base request's model rather than overwriting with undefined.
242249
const result: GenerateContentParameters = {
243250
...baseRequest,
244-
model: hookRequest.model,
251+
model: hookRequest.model ?? baseRequest?.model ?? '',
245252
contents,
246253
};
247254

0 commit comments

Comments
 (0)