Skip to content

Commit 68c79e0

Browse files
Fix failing test, add Vercel Data Stream encoder tests, update CHANGELOG
- Fix POST /api/v1/ai/chat test: now defaults to streaming mode, legacy JSON mode requires stream: false - Add 18 new tests covering vercel-stream-encoder (encodeStreamPart, encodeVercelDataStream), route dual-mode, systemPrompt, flat options, and array content validation - Update message validation to accept array content for Vercel multi-part - Update CHANGELOG.md with migration notes for Data Stream Protocol - All 176 tests passing (up from 158) Agent-Logs-Url: https://github.com/objectstack-ai/spec/sessions/fc1d7caa-ef41-4b0b-9ce4-267c262596b4 Co-authored-by: xuyushun441-sys <255036401+xuyushun441-sys@users.noreply.github.com>
1 parent 8a8476a commit 68c79e0

3 files changed

Lines changed: 345 additions & 2 deletions

File tree

CHANGELOG.md

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,26 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
4242

4343
### Added
4444
- `ai` v6 as a dependency of `@objectstack/spec` for type re-exports
45+
- **Vercel AI Data Stream Protocol support on `/api/v1/ai/chat`** — The chat
46+
endpoint now supports dual-mode responses:
47+
- **Streaming (default)**: When `stream` is not `false`, returns Vercel Data
48+
Stream Protocol frames (`0:` text, `9:` tool-call, `d:` finish, etc.),
49+
directly consumable by `@ai-sdk/react/useChat`
50+
- **JSON (legacy)**: When `stream: false`, returns the original JSON response
51+
- Accepts Vercel useChat flat body format (`system`, `model`, `temperature`,
52+
`maxTokens` as top-level fields) alongside the legacy `{ messages, options }`
53+
- `systemPrompt` / `system` field is prepended as a system message
54+
- Message validation now accepts Vercel multi-part array content
55+
- `RouteResponse.vercelDataStream` flag signals HTTP server layer to encode
56+
events using the Vercel Data Stream frame format
57+
- **`VercelLLMAdapter`** — Production adapter wrapping Vercel AI SDK's
58+
`generateText` / `streamText` for any compatible model provider (OpenAI,
59+
Anthropic, Google, Ollama, etc.)
60+
- **`vercel-stream-encoder.ts`** — Utilities (`encodeStreamPart`,
61+
`encodeVercelDataStream`) to convert `TextStreamPart<ToolSet>` events into
62+
Vercel Data Stream wire-format frames
63+
- 176 service-ai tests passing (18 new tests for stream encoder, route
64+
dual-mode, systemPrompt, flat options, array content)
4565

4666
## [4.0.1] — 2026-03-31
4767

packages/services/service-ai/src/__tests__/ai-service.test.ts

Lines changed: 91 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -382,18 +382,106 @@ describe('AI Routes', () => {
382382
expect(paths).toContain('DELETE /api/v1/ai/conversations/:id');
383383
});
384384

385-
it('POST /api/v1/ai/chat should return chat result', async () => {
385+
it('POST /api/v1/ai/chat should return JSON result when stream=false', async () => {
386+
const routes = buildAIRoutes(service, service.conversationService, silentLogger);
387+
const chatRoute = routes.find(r => r.path === '/api/v1/ai/chat')!;
388+
389+
const response = await chatRoute.handler({
390+
body: { messages: [{ role: 'user', content: 'Hi' }], stream: false },
391+
});
392+
393+
expect(response.status).toBe(200);
394+
expect((response.body as any).content).toBe('[memory] Hi');
395+
});
396+
397+
it('POST /api/v1/ai/chat should default to Vercel Data Stream mode', async () => {
386398
const routes = buildAIRoutes(service, service.conversationService, silentLogger);
387399
const chatRoute = routes.find(r => r.path === '/api/v1/ai/chat')!;
388400

389401
const response = await chatRoute.handler({
390402
body: { messages: [{ role: 'user', content: 'Hi' }] },
391403
});
392404

405+
expect(response.status).toBe(200);
406+
expect(response.stream).toBe(true);
407+
expect(response.vercelDataStream).toBe(true);
408+
expect(response.events).toBeDefined();
409+
410+
// Consume the Vercel Data Stream events
411+
const events: unknown[] = [];
412+
for await (const event of response.events!) {
413+
events.push(event);
414+
}
415+
expect(events.length).toBeGreaterThan(0);
416+
});
417+
418+
it('POST /api/v1/ai/chat should prepend systemPrompt as system message', async () => {
419+
const routes = buildAIRoutes(service, service.conversationService, silentLogger);
420+
const chatRoute = routes.find(r => r.path === '/api/v1/ai/chat')!;
421+
422+
const response = await chatRoute.handler({
423+
body: {
424+
messages: [{ role: 'user', content: 'Hello' }],
425+
system: 'You are a helpful assistant',
426+
stream: false,
427+
},
428+
});
429+
430+
expect(response.status).toBe(200);
431+
// MemoryLLMAdapter echoes the last user message
432+
expect((response.body as any).content).toBe('[memory] Hello');
433+
});
434+
435+
it('POST /api/v1/ai/chat should accept deprecated systemPrompt field', async () => {
436+
const routes = buildAIRoutes(service, service.conversationService, silentLogger);
437+
const chatRoute = routes.find(r => r.path === '/api/v1/ai/chat')!;
438+
439+
const response = await chatRoute.handler({
440+
body: {
441+
messages: [{ role: 'user', content: 'Hi' }],
442+
systemPrompt: 'Be concise',
443+
stream: false,
444+
},
445+
});
446+
393447
expect(response.status).toBe(200);
394448
expect((response.body as any).content).toBe('[memory] Hi');
395449
});
396450

451+
it('POST /api/v1/ai/chat should accept flat Vercel-style fields (model, temperature)', async () => {
452+
const routes = buildAIRoutes(service, service.conversationService, silentLogger);
453+
const chatRoute = routes.find(r => r.path === '/api/v1/ai/chat')!;
454+
455+
const response = await chatRoute.handler({
456+
body: {
457+
messages: [{ role: 'user', content: 'Hi' }],
458+
model: 'gpt-4o',
459+
temperature: 0.5,
460+
stream: false,
461+
},
462+
});
463+
464+
expect(response.status).toBe(200);
465+
// MemoryLLMAdapter uses the model from options when provided
466+
expect((response.body as any).model).toBe('gpt-4o');
467+
});
468+
469+
it('POST /api/v1/ai/chat should accept array content (Vercel multi-part)', async () => {
470+
const routes = buildAIRoutes(service, service.conversationService, silentLogger);
471+
const chatRoute = routes.find(r => r.path === '/api/v1/ai/chat')!;
472+
473+
const response = await chatRoute.handler({
474+
body: {
475+
messages: [{ role: 'user', content: [{ type: 'text', text: 'Hi' }] }],
476+
stream: false,
477+
},
478+
});
479+
480+
// MemoryLLMAdapter falls back to "(complex content)" for non-string
481+
expect(response.status).toBe(200);
482+
expect((response.body as any).content).toBe('[memory] (complex content)');
483+
});
484+
397485
it('POST /api/v1/ai/chat should return 400 without messages', async () => {
398486
const routes = buildAIRoutes(service, service.conversationService, silentLogger);
399487
const chatRoute = routes.find(r => r.path === '/api/v1/ai/chat')!;
@@ -531,7 +619,7 @@ describe('AI Routes', () => {
531619
expect((response.body as any).error).toContain('message.role');
532620
});
533621

534-
it('POST /api/v1/ai/chat should return 400 for messages with non-string content', async () => {
622+
it('POST /api/v1/ai/chat should return 400 for messages with non-string/non-array content', async () => {
535623
const routes = buildAIRoutes(service, service.conversationService, silentLogger);
536624
const chatRoute = routes.find(r => r.path === '/api/v1/ai/chat')!;
537625

@@ -620,6 +708,7 @@ describe('AI Routes', () => {
620708
{ role: 'assistant', content: '' },
621709
{ role: 'tool', content: '{"temp": 22}', toolCallId: 'call_1' },
622710
],
711+
stream: false,
623712
},
624713
});
625714

Lines changed: 234 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,234 @@
1+
// Copyright (c) 2025 ObjectStack. Licensed under the Apache-2.0 license.
2+
3+
import { describe, it, expect } from 'vitest';
4+
import type { TextStreamPart, ToolSet } from '@objectstack/spec/contracts';
5+
import { encodeStreamPart, encodeVercelDataStream } from '../stream/vercel-stream-encoder.js';
6+
7+
// ─────────────────────────────────────────────────────────────────
8+
// encodeStreamPart — individual frame encoding
9+
// ─────────────────────────────────────────────────────────────────
10+
11+
describe('encodeStreamPart', () => {
12+
it('should encode text-delta as "0:" frame', () => {
13+
const part = { type: 'text-delta', text: 'Hello world' } as TextStreamPart<ToolSet>;
14+
expect(encodeStreamPart(part)).toBe('0:"Hello world"\n');
15+
});
16+
17+
it('should JSON-escape text-delta content', () => {
18+
const part = { type: 'text-delta', text: 'say "hi"\nnewline' } as TextStreamPart<ToolSet>;
19+
const frame = encodeStreamPart(part);
20+
expect(frame).toBe(`0:${JSON.stringify('say "hi"\nnewline')}\n`);
21+
expect(frame.startsWith('0:')).toBe(true);
22+
});
23+
24+
it('should encode tool-call as "9:" frame', () => {
25+
const part = {
26+
type: 'tool-call',
27+
toolCallId: 'call_1',
28+
toolName: 'get_weather',
29+
input: { location: 'San Francisco' },
30+
} as TextStreamPart<ToolSet>;
31+
32+
const frame = encodeStreamPart(part);
33+
expect(frame.startsWith('9:')).toBe(true);
34+
35+
const payload = JSON.parse(frame.slice(2));
36+
expect(payload).toEqual({
37+
toolCallId: 'call_1',
38+
toolName: 'get_weather',
39+
args: { location: 'San Francisco' },
40+
});
41+
});
42+
43+
it('should encode tool-call-streaming-start as "b:" frame', () => {
44+
const part = {
45+
type: 'tool-call-streaming-start',
46+
toolCallId: 'call_2',
47+
toolName: 'search',
48+
} as TextStreamPart<ToolSet>;
49+
50+
const frame = encodeStreamPart(part);
51+
expect(frame.startsWith('b:')).toBe(true);
52+
53+
const payload = JSON.parse(frame.slice(2));
54+
expect(payload).toEqual({
55+
toolCallId: 'call_2',
56+
toolName: 'search',
57+
});
58+
});
59+
60+
it('should encode tool-call-delta as "c:" frame', () => {
61+
const part = {
62+
type: 'tool-call-delta',
63+
toolCallId: 'call_2',
64+
argsTextDelta: '{"query":',
65+
} as TextStreamPart<ToolSet>;
66+
67+
const frame = encodeStreamPart(part);
68+
expect(frame.startsWith('c:')).toBe(true);
69+
70+
const payload = JSON.parse(frame.slice(2));
71+
expect(payload).toEqual({
72+
toolCallId: 'call_2',
73+
argsTextDelta: '{"query":',
74+
});
75+
});
76+
77+
it('should encode tool-result as "a:" frame', () => {
78+
const part = {
79+
type: 'tool-result',
80+
toolCallId: 'call_1',
81+
toolName: 'get_weather',
82+
result: { temperature: 72 },
83+
} as TextStreamPart<ToolSet>;
84+
85+
const frame = encodeStreamPart(part);
86+
expect(frame.startsWith('a:')).toBe(true);
87+
88+
const payload = JSON.parse(frame.slice(2));
89+
expect(payload).toEqual({
90+
toolCallId: 'call_1',
91+
result: { temperature: 72 },
92+
});
93+
});
94+
95+
it('should encode finish as "d:" frame', () => {
96+
const part = {
97+
type: 'finish',
98+
finishReason: 'stop',
99+
totalUsage: { promptTokens: 10, completionTokens: 20, totalTokens: 30 },
100+
rawFinishReason: 'stop',
101+
} as unknown as TextStreamPart<ToolSet>;
102+
103+
const frame = encodeStreamPart(part);
104+
expect(frame.startsWith('d:')).toBe(true);
105+
106+
const payload = JSON.parse(frame.slice(2));
107+
expect(payload.finishReason).toBe('stop');
108+
expect(payload.usage).toEqual({ promptTokens: 10, completionTokens: 20, totalTokens: 30 });
109+
});
110+
111+
it('should encode step-finish as "e:" frame', () => {
112+
const part = {
113+
type: 'step-finish',
114+
finishReason: 'tool-calls',
115+
totalUsage: { promptTokens: 5, completionTokens: 10, totalTokens: 15 },
116+
isContinued: true,
117+
} as unknown as TextStreamPart<ToolSet>;
118+
119+
const frame = encodeStreamPart(part);
120+
expect(frame.startsWith('e:')).toBe(true);
121+
122+
const payload = JSON.parse(frame.slice(2));
123+
expect(payload.finishReason).toBe('tool-calls');
124+
expect(payload.isContinued).toBe(true);
125+
});
126+
127+
it('should return empty string for unknown event types', () => {
128+
const part = { type: 'unknown-internal' } as unknown as TextStreamPart<ToolSet>;
129+
expect(encodeStreamPart(part)).toBe('');
130+
});
131+
});
132+
133+
// ─────────────────────────────────────────────────────────────────
134+
// encodeVercelDataStream — async iterable transformation
135+
// ─────────────────────────────────────────────────────────────────
136+
137+
describe('encodeVercelDataStream', () => {
138+
it('should transform stream events into Vercel Data Stream frames', async () => {
139+
async function* source(): AsyncIterable<TextStreamPart<ToolSet>> {
140+
yield { type: 'text-delta', text: 'Hello' } as TextStreamPart<ToolSet>;
141+
yield { type: 'text-delta', text: ' world' } as TextStreamPart<ToolSet>;
142+
yield {
143+
type: 'finish',
144+
finishReason: 'stop',
145+
totalUsage: { promptTokens: 0, completionTokens: 0, totalTokens: 0 },
146+
rawFinishReason: 'stop',
147+
} as unknown as TextStreamPart<ToolSet>;
148+
}
149+
150+
const frames: string[] = [];
151+
for await (const frame of encodeVercelDataStream(source())) {
152+
frames.push(frame);
153+
}
154+
155+
expect(frames).toHaveLength(3);
156+
expect(frames[0]).toBe('0:"Hello"\n');
157+
expect(frames[1]).toBe('0:" world"\n');
158+
expect(frames[2]).toMatch(/^d:/);
159+
});
160+
161+
it('should skip events with no wire format mapping', async () => {
162+
async function* source(): AsyncIterable<TextStreamPart<ToolSet>> {
163+
yield { type: 'text-delta', text: 'Hi' } as TextStreamPart<ToolSet>;
164+
yield { type: 'unknown-internal' } as unknown as TextStreamPart<ToolSet>;
165+
yield {
166+
type: 'finish',
167+
finishReason: 'stop',
168+
totalUsage: { promptTokens: 0, completionTokens: 0, totalTokens: 0 },
169+
rawFinishReason: 'stop',
170+
} as unknown as TextStreamPart<ToolSet>;
171+
}
172+
173+
const frames: string[] = [];
174+
for await (const frame of encodeVercelDataStream(source())) {
175+
frames.push(frame);
176+
}
177+
178+
// 'unknown-internal' is silently dropped
179+
expect(frames).toHaveLength(2);
180+
expect(frames[0]).toBe('0:"Hi"\n');
181+
expect(frames[1]).toMatch(/^d:/);
182+
});
183+
184+
it('should handle empty stream', async () => {
185+
async function* source(): AsyncIterable<TextStreamPart<ToolSet>> {
186+
// empty
187+
}
188+
189+
const frames: string[] = [];
190+
for await (const frame of encodeVercelDataStream(source())) {
191+
frames.push(frame);
192+
}
193+
194+
expect(frames).toHaveLength(0);
195+
});
196+
197+
it('should handle tool-call events in stream', async () => {
198+
async function* source(): AsyncIterable<TextStreamPart<ToolSet>> {
199+
yield {
200+
type: 'tool-call',
201+
toolCallId: 'call_1',
202+
toolName: 'search',
203+
input: { query: 'test' },
204+
} as TextStreamPart<ToolSet>;
205+
yield {
206+
type: 'tool-result',
207+
toolCallId: 'call_1',
208+
toolName: 'search',
209+
result: { hits: 42 },
210+
} as TextStreamPart<ToolSet>;
211+
yield {
212+
type: 'finish',
213+
finishReason: 'tool-calls',
214+
totalUsage: { promptTokens: 0, completionTokens: 0, totalTokens: 0 },
215+
rawFinishReason: 'tool_calls',
216+
} as unknown as TextStreamPart<ToolSet>;
217+
}
218+
219+
const frames: string[] = [];
220+
for await (const frame of encodeVercelDataStream(source())) {
221+
frames.push(frame);
222+
}
223+
224+
expect(frames).toHaveLength(3);
225+
expect(frames[0]).toMatch(/^9:/);
226+
expect(frames[1]).toMatch(/^a:/);
227+
expect(frames[2]).toMatch(/^d:/);
228+
229+
// Verify tool-call frame content
230+
const toolCallPayload = JSON.parse(frames[0].slice(2));
231+
expect(toolCallPayload.toolCallId).toBe('call_1');
232+
expect(toolCallPayload.args).toEqual({ query: 'test' });
233+
});
234+
});

0 commit comments

Comments
 (0)