Skip to content

Commit cb04ed8

Browse files
Claudehotlong
andauthored
Fix AI Chat reasoning display with correct Vercel AI SDK Data Stream Protocol
This commit fixes the reasoning/thinking display in the Studio AI chat by correcting both the backend stream encoding and frontend parsing to align with the Vercel AI SDK v6 Data Stream Protocol. ## Backend Changes **packages/services/service-ai/src/stream/vercel-stream-encoder.ts** - Add `dataStreamLine()` helper for Data Stream Protocol format - Fix reasoning event encoding to use `g:` prefix instead of standard SSE: - `reasoning-start` → `g:{"text":""}\n` - `reasoning-delta` → `g:{"text":"..."}\n` - `reasoning-end` → empty string (no marker needed) - Aligns with Vercel AI SDK protocol specification **packages/services/service-ai/src/__tests__/vercel-stream-encoder.test.ts** - Update tests to expect `g:` prefix format for reasoning events - All 17 tests passing ## Frontend Changes **apps/studio/src/components/AiChatPanel.tsx** - Extract reasoning from `message.parts` instead of incorrect `data` property - Process `reasoning-delta`/`reasoning` parts from message parts array - Process `step-start`/`step-finish` parts for progress tracking - Remove unused stream event type interfaces (fixes TypeScript warnings) ## Technical Details The Vercel AI SDK v6 Data Stream Protocol uses prefix identifiers: - `0:` - Text content - `2:` - Custom data annotations - `g:` - Reasoning/thinking content (DeepSeek R1, OpenAI o1-style models) Previously the backend was encoding reasoning as standard SSE events (`data: {"type":"reasoning-delta",...}`) which the frontend couldn't access through the `useChat` hook. The correct format is the Data Stream Protocol format (`g:{"text":"..."}\n`) which appears in `message.parts`. Fixes: #1093 (AI chat reasoning display) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com> Co-authored-by: hotlong <50353452+hotlong@users.noreply.github.com>
1 parent 6b50186 commit cb04ed8

File tree

3 files changed

+43
-98
lines changed

3 files changed

+43
-98
lines changed

apps/studio/src/components/AiChatPanel.tsx

Lines changed: 25 additions & 66 deletions
Original file line numberDiff line numberDiff line change
@@ -34,38 +34,6 @@ interface AgentSummary {
3434
role: string;
3535
}
3636

37-
/**
38-
* Extended stream event types for reasoning and steps.
39-
* These extend the standard Vercel AI SDK stream events.
40-
*/
41-
interface ReasoningStartEvent {
42-
type: 'reasoning-start';
43-
id: string;
44-
}
45-
46-
interface ReasoningDeltaEvent {
47-
type: 'reasoning-delta';
48-
id: string;
49-
delta: string;
50-
}
51-
52-
interface ReasoningEndEvent {
53-
type: 'reasoning-end';
54-
id: string;
55-
}
56-
57-
interface StepStartEvent {
58-
type: 'step-start';
59-
stepId: string;
60-
stepName: string;
61-
}
62-
63-
interface StepFinishEvent {
64-
type: 'step-finish';
65-
stepId: string;
66-
stepName: string;
67-
}
68-
6937
/**
7038
* Track active thinking/reasoning state during streaming.
7139
*/
@@ -457,10 +425,9 @@ export function AiChatPanel() {
457425
[baseUrl, selectedAgent],
458426
);
459427

460-
const { messages, sendMessage, setMessages, status, error, addToolApprovalResponse, data } = useChat({
428+
const { messages, sendMessage, setMessages, status, error, addToolApprovalResponse } = useChat({
461429
transport,
462430
messages: initialMessages,
463-
streamMode: 'stream-data',
464431
onFinish: () => {
465432
// Reset thinking state when stream completes
466433
setThinkingState({
@@ -473,42 +440,34 @@ export function AiChatPanel() {
473440

474441
const isStreaming = status === 'streaming' || status === 'submitted';
475442

476-
// Process stream data events for reasoning and step progress
443+
// Extract reasoning and step progress from the latest assistant message parts
477444
useEffect(() => {
478-
if (!data || data.length === 0) return;
479-
480-
// Process each data event from the stream
481-
data.forEach((event: any) => {
482-
if (event.type === 'reasoning-delta') {
483-
setThinkingState((prev) => ({
484-
...prev,
485-
reasoning: [...prev.reasoning, event.delta],
486-
}));
487-
} else if (event.type === 'step-start') {
488-
setThinkingState((prev) => {
489-
const newActiveSteps = new Map(prev.activeSteps);
490-
newActiveSteps.set(event.stepId, {
491-
stepName: event.stepName,
492-
startedAt: Date.now(),
493-
});
494-
return {
495-
...prev,
496-
activeSteps: newActiveSteps,
497-
};
498-
});
499-
} else if (event.type === 'step-finish') {
500-
setThinkingState((prev) => {
501-
const newActiveSteps = new Map(prev.activeSteps);
502-
newActiveSteps.delete(event.stepId);
503-
return {
504-
...prev,
505-
activeSteps: newActiveSteps,
506-
completedSteps: [...prev.completedSteps, event.stepName],
507-
};
445+
if (!isStreaming || messages.length === 0) return;
446+
447+
// Get the latest message
448+
const lastMessage = messages[messages.length - 1];
449+
if (lastMessage.role !== 'assistant') return;
450+
451+
// Process message parts for reasoning and steps
452+
const reasoning: string[] = [];
453+
const activeSteps = new Map<string, { stepName: string; startedAt: number }>();
454+
const completedSteps: string[] = [];
455+
456+
(lastMessage.parts || []).forEach((part: any) => {
457+
if (part.type === 'reasoning-delta' || part.type === 'reasoning') {
458+
reasoning.push(part.text);
459+
} else if (part.type === 'step-start') {
460+
activeSteps.set(part.stepId, {
461+
stepName: part.stepName,
462+
startedAt: Date.now(),
508463
});
464+
} else if (part.type === 'step-finish') {
465+
completedSteps.push(part.stepName);
509466
}
510467
});
511-
}, [data]);
468+
469+
setThinkingState({ reasoning, activeSteps, completedSteps });
470+
}, [messages, isStreaming]);
512471

513472
// Persist messages to localStorage whenever they change
514473
useEffect(() => {

packages/services/service-ai/src/__tests__/vercel-stream-encoder.test.ts

Lines changed: 6 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -129,48 +129,35 @@ describe('encodeStreamPart', () => {
129129
expect(encodeStreamPart(part)).toBe('');
130130
});
131131

132-
it('should encode reasoning-start part as SSE frame', () => {
132+
it('should encode reasoning-start part with g: prefix', () => {
133133
const part = {
134134
type: 'reasoning-start',
135135
id: 'r1',
136136
} as unknown as TextStreamPart<ToolSet>;
137137

138138
const frame = encodeStreamPart(part);
139-
const payload = parseSSE(frame);
140-
expect(payload).toEqual({
141-
type: 'reasoning-start',
142-
id: 'r1',
143-
});
139+
expect(frame).toBe('g:{"text":""}\n');
144140
});
145141

146-
it('should encode reasoning-delta part as SSE frame', () => {
142+
it('should encode reasoning-delta part with g: prefix', () => {
147143
const part = {
148144
type: 'reasoning-delta',
149145
id: 'r1',
150146
text: 'Let me think through this step by step...',
151147
} as unknown as TextStreamPart<ToolSet>;
152148

153149
const frame = encodeStreamPart(part);
154-
const payload = parseSSE(frame);
155-
expect(payload).toEqual({
156-
type: 'reasoning-delta',
157-
id: 'r1',
158-
delta: 'Let me think through this step by step...',
159-
});
150+
expect(frame).toBe('g:{"text":"Let me think through this step by step..."}\n');
160151
});
161152

162-
it('should encode reasoning-end part as SSE frame', () => {
153+
it('should encode reasoning-end part as empty (no specific end marker)', () => {
163154
const part = {
164155
type: 'reasoning-end',
165156
id: 'r1',
166157
} as unknown as TextStreamPart<ToolSet>;
167158

168159
const frame = encodeStreamPart(part);
169-
const payload = parseSSE(frame);
170-
expect(payload).toEqual({
171-
type: 'reasoning-end',
172-
id: 'r1',
173-
});
160+
expect(frame).toBe('');
174161
});
175162

176163
it('should pass through custom step events', () => {

packages/services/service-ai/src/stream/vercel-stream-encoder.ts

Lines changed: 12 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,14 @@ function sse(data: object): string {
2323
return `data: ${JSON.stringify(data)}\n\n`;
2424
}
2525

26+
/**
27+
* Encode data using Vercel AI SDK Data Stream Protocol prefixes.
28+
* @see https://ai-sdk.dev/docs/ai-sdk-ui/stream-protocol
29+
*/
30+
function dataStreamLine(prefix: string, data: object): string {
31+
return `${prefix}:${JSON.stringify(data)}\n`;
32+
}
33+
2634
// ── Public API ──────────────────────────────────────────────────────
2735

2836
/**
@@ -72,24 +80,15 @@ export function encodeStreamPart(part: TextStreamPart<ToolSet>): string {
7280
});
7381

7482
// Handle reasoning/thinking streams (DeepSeek R1, o1-style models)
83+
// Use 'g:' prefix for reasoning content per Vercel AI SDK protocol
7584
case 'reasoning-start':
76-
return sse({
77-
type: 'reasoning-start',
78-
id: part.id,
79-
});
85+
return dataStreamLine('g', { text: '' });
8086

8187
case 'reasoning-delta':
82-
return sse({
83-
type: 'reasoning-delta',
84-
id: part.id,
85-
delta: part.text,
86-
});
88+
return dataStreamLine('g', { text: part.text });
8789

8890
case 'reasoning-end':
89-
return sse({
90-
type: 'reasoning-end',
91-
id: part.id,
92-
});
91+
return ''; // No specific end marker needed for reasoning
9392

9493
// finish-step and finish are handled by the generator, not here
9594
default:

0 commit comments

Comments
 (0)