Skip to content

Commit 85f7d1f

Browse files
committed
fix(gui): respect model reasoning disablement
1 parent a1ead04 commit 85f7d1f

2 files changed

Lines changed: 92 additions & 0 deletions

File tree

gui/src/redux/thunks/streamNormalInput.ts

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,10 @@ function buildReasoningCompletionOptions(
5050
hasReasoningEnabled: boolean | undefined,
5151
model: ModelDescription,
5252
): LLMFullCompletionOptions {
53+
if (model.completionOptions?.reasoning === false) {
54+
return baseOptions;
55+
}
56+
5357
if (hasReasoningEnabled === undefined) {
5458
return baseOptions;
5559
}

gui/src/redux/thunks/streamResponse.test.ts

Lines changed: 88 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -476,6 +476,94 @@ describe("streamResponseThunk", () => {
476476
});
477477
});
478478

479+
it("should preserve model-level reasoning disablement in chat requests", async () => {
480+
const initialState = getRootStateWithClaude();
481+
initialState.config.config.selectedModelByRole.chat = {
482+
title: "Qwen 3 30B",
483+
model: "qwen3:30b",
484+
provider: "ollama",
485+
underlyingProviderName: "ollama",
486+
completionOptions: { reasoning: false },
487+
};
488+
initialState.session.hasReasoningEnabled = true;
489+
initialState.session.history = [
490+
{
491+
message: { id: "1", role: "user", content: "Hello" },
492+
contextItems: [],
493+
},
494+
];
495+
496+
const mockStore = createMockStore(initialState);
497+
const mockIdeMessenger = mockStore.mockIdeMessenger;
498+
const requestSpy = vi.spyOn(mockIdeMessenger, "request");
499+
500+
mockIdeMessenger.responses["llm/compileChat"] = {
501+
compiledChatMessages: [{ role: "user", content: "Hello" }],
502+
didPrune: false,
503+
contextPercentage: 0.8,
504+
};
505+
506+
async function* mockStreamGenerator(): AsyncGenerator<
507+
AssistantChatMessage[],
508+
PromptLog
509+
> {
510+
yield [{ role: "assistant", content: "First chunk" }];
511+
return {
512+
prompt: "Hello",
513+
completion: "Hi there!",
514+
modelProvider: "ollama",
515+
modelTitle: "Qwen 3 30B",
516+
};
517+
}
518+
519+
mockIdeMessenger.llmStreamChat = vi
520+
.fn()
521+
.mockReturnValue(mockStreamGenerator());
522+
523+
await mockStore.dispatch(
524+
streamResponseThunk({
525+
editorState: mockEditorState,
526+
modifiers: mockModifiers,
527+
}) as any,
528+
);
529+
530+
expect(requestSpy).toHaveBeenCalledWith("llm/compileChat", {
531+
messages: [
532+
{
533+
role: "system",
534+
content: "You are a helpful assistant.",
535+
},
536+
{
537+
role: "user",
538+
content: [
539+
{
540+
type: "text",
541+
text: "Hello",
542+
},
543+
],
544+
},
545+
{
546+
role: "user",
547+
content: [
548+
{
549+
type: "text",
550+
text: "Hello, please help me with this code",
551+
},
552+
],
553+
},
554+
],
555+
options: {},
556+
});
557+
558+
expect(mockIdeMessenger.llmStreamChat).toHaveBeenCalledWith(
559+
expect.objectContaining({
560+
completionOptions: {},
561+
title: "Qwen 3 30B",
562+
}),
563+
expect.any(AbortSignal),
564+
);
565+
});
566+
479567
it("should execute streaming flow with tool call execution", async () => {
480568
// Set up auto-approved tool setting for our test tool
481569
const stateWithToolSettings = getRootStateWithClaude();

0 commit comments

Comments
 (0)