Skip to content

Commit 8a8476a

Browse files
Copilothotlong
andauthored
Changes before error encountered
Agent-Logs-Url: https://github.com/objectstack-ai/spec/sessions/74cfdaa5-9d61-426c-ba7c-ecaba5f480f2 Co-authored-by: hotlong <50353452+hotlong@users.noreply.github.com>
1 parent af23ad1 commit 8a8476a

6 files changed

Lines changed: 297 additions & 8 deletions

File tree

packages/services/service-ai/src/adapters/index.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,3 +2,5 @@
22

33
export type { LLMAdapter } from '@objectstack/spec/contracts';
44
export { MemoryLLMAdapter } from './memory-adapter.js';
5+
export { VercelLLMAdapter } from './vercel-adapter.js';
6+
export type { VercelLLMAdapterConfig } from './vercel-adapter.js';
Lines changed: 117 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,117 @@
1+
// Copyright (c) 2025 ObjectStack. Licensed under the Apache-2.0 license.
2+
3+
import type {
4+
ModelMessage,
5+
AIRequestOptions,
6+
AIResult,
7+
TextStreamPart,
8+
ToolSet,
9+
} from '@objectstack/spec/contracts';
10+
import type { LLMAdapter } from '@objectstack/spec/contracts';
11+
import type { LanguageModelV3 } from '@ai-sdk/provider';
12+
import { generateText, streamText } from 'ai';
13+
14+
/**
15+
* VercelLLMAdapter — Production LLM adapter powered by the Vercel AI SDK.
16+
*
17+
* Wraps `generateText` / `streamText` from the `ai` package, delegating to
18+
* any Vercel AI SDK–compatible model provider (OpenAI, Anthropic, Google,
19+
* Ollama, etc.).
20+
*
21+
* @example
22+
* ```typescript
23+
* import { openai } from '@ai-sdk/openai';
24+
* import { VercelLLMAdapter } from '@objectstack/service-ai';
25+
*
26+
* const adapter = new VercelLLMAdapter({ model: openai('gpt-4o') });
27+
* ```
28+
*/
29+
export class VercelLLMAdapter implements LLMAdapter {
30+
readonly name = 'vercel';
31+
32+
private readonly model: LanguageModelV3;
33+
34+
constructor(config: VercelLLMAdapterConfig) {
35+
this.model = config.model;
36+
}
37+
38+
async chat(messages: ModelMessage[], options?: AIRequestOptions): Promise<AIResult> {
39+
const result = await generateText({
40+
model: this.model,
41+
messages,
42+
temperature: options?.temperature,
43+
maxTokens: options?.maxTokens,
44+
});
45+
46+
return {
47+
content: result.text,
48+
model: result.response?.modelId,
49+
toolCalls: result.toolCalls?.length ? result.toolCalls : undefined,
50+
usage: result.usage ? {
51+
promptTokens: result.usage.promptTokens,
52+
completionTokens: result.usage.completionTokens,
53+
totalTokens: result.usage.totalTokens,
54+
} : undefined,
55+
};
56+
}
57+
58+
async complete(prompt: string, options?: AIRequestOptions): Promise<AIResult> {
59+
const result = await generateText({
60+
model: this.model,
61+
prompt,
62+
temperature: options?.temperature,
63+
maxTokens: options?.maxTokens,
64+
});
65+
66+
return {
67+
content: result.text,
68+
model: result.response?.modelId,
69+
usage: result.usage ? {
70+
promptTokens: result.usage.promptTokens,
71+
completionTokens: result.usage.completionTokens,
72+
totalTokens: result.usage.totalTokens,
73+
} : undefined,
74+
};
75+
}
76+
77+
async *streamChat(
78+
messages: ModelMessage[],
79+
options?: AIRequestOptions,
80+
): AsyncIterable<TextStreamPart<ToolSet>> {
81+
const result = streamText({
82+
model: this.model,
83+
messages,
84+
temperature: options?.temperature,
85+
maxTokens: options?.maxTokens,
86+
});
87+
88+
for await (const part of result.fullStream) {
89+
yield part as TextStreamPart<ToolSet>;
90+
}
91+
}
92+
93+
async embed(input: string | string[]): Promise<number[][]> {
94+
// Vercel AI SDK uses a separate EmbeddingModel — not supported via this adapter.
95+
throw new Error(
96+
'[VercelLLMAdapter] Embeddings require a dedicated EmbeddingModel. ' +
97+
'Configure an embedding adapter instead.',
98+
);
99+
}
100+
101+
async listModels(): Promise<string[]> {
102+
// Model listing is provider-specific and not available through the base SDK.
103+
return [];
104+
}
105+
}
106+
107+
/**
108+
* Configuration for the Vercel LLM adapter.
109+
*/
110+
export interface VercelLLMAdapterConfig {
111+
/**
112+
* A Vercel AI SDK–compatible language model instance.
113+
*
114+
* @example `openai('gpt-4o')` or `anthropic('claude-sonnet-4-20250514')`
115+
*/
116+
model: LanguageModelV3;
117+
}

packages/services/service-ai/src/index.ts

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,8 +10,13 @@ export type { AIServicePluginOptions } from './plugin.js';
1010

1111
// Adapters
1212
export { MemoryLLMAdapter } from './adapters/memory-adapter.js';
13+
export { VercelLLMAdapter } from './adapters/vercel-adapter.js';
14+
export type { VercelLLMAdapterConfig } from './adapters/vercel-adapter.js';
1315
export type { LLMAdapter } from '@objectstack/spec/contracts';
1416

17+
// Vercel Data Stream encoder
18+
export { encodeStreamPart, encodeVercelDataStream } from './stream/vercel-stream-encoder.js';
19+
1520
// Conversation
1621
export { InMemoryConversationService } from './conversation/in-memory-conversation-service.js';
1722
export { ObjectQLConversationService } from './conversation/objectql-conversation-service.js';

packages/services/service-ai/src/routes/ai-routes.ts

Lines changed: 68 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -63,6 +63,14 @@ export interface RouteResponse {
6363
stream?: boolean;
6464
/** Async iterable of SSE events (when stream=true) */
6565
events?: AsyncIterable<unknown>;
66+
/**
67+
* When `true`, the HTTP server layer should encode the `events` iterable
68+
* using the Vercel AI Data Stream Protocol frame format (`0:`, `9:`, `d:`, …)
69+
* instead of generic SSE `data:` lines.
70+
*
71+
* @see https://ai-sdk.dev/docs/ai-sdk-ui/stream-protocol
72+
*/
73+
vercelDataStream?: boolean;
6674
}
6775

6876
/** Valid message roles accepted by the AI routes. */
@@ -71,6 +79,9 @@ const VALID_ROLES = new Set<string>(['system', 'user', 'assistant', 'tool']);
7179
/**
7280
* Validate that `raw` is a well-formed message.
7381
* Returns null on success, or an error string on failure.
82+
*
83+
* Accepts both simple string content (legacy) and Vercel AI SDK array content
84+
* (e.g. `[{ type: 'text', text: '...' }]`).
7485
*/
7586
function validateMessage(raw: unknown): string | null {
7687
if (typeof raw !== 'object' || raw === null) {
@@ -80,8 +91,9 @@ function validateMessage(raw: unknown): string | null {
8091
if (typeof msg.role !== 'string' || !VALID_ROLES.has(msg.role)) {
8192
return `message.role must be one of ${[...VALID_ROLES].map(r => `"${r}"`).join(', ')}`;
8293
}
83-
if (typeof msg.content !== 'string') {
84-
return 'message.content must be a string';
94+
// Accept string content (legacy) or array content (Vercel multi-part)
95+
if (typeof msg.content !== 'string' && !Array.isArray(msg.content)) {
96+
return 'message.content must be a string or an array';
8597
}
8698
return null;
8799
}
@@ -112,18 +124,26 @@ export function buildAIRoutes(
112124
): RouteDefinition[] {
113125
return [
114126
// ── Chat ────────────────────────────────────────────────────
127+
//
128+
// Dual-mode endpoint compatible with both the legacy ObjectStack
129+
// format (`{ messages, options }`) and the Vercel AI SDK useChat
130+
// flat format (`{ messages, system, model, stream, … }`).
131+
//
132+
// Behaviour:
133+
// • `stream !== false` → Vercel Data Stream Protocol (SSE)
134+
// • `stream === false` → JSON response (legacy)
135+
//
115136
{
116137
method: 'POST',
117138
path: '/api/v1/ai/chat',
118-
description: 'Synchronous chat completion',
139+
description: 'Chat completion (supports Vercel AI Data Stream Protocol)',
119140
auth: true,
120141
permissions: ['ai:chat'],
121142
handler: async (req) => {
122-
const { messages, options } = (req.body ?? {}) as {
123-
messages?: unknown[];
124-
options?: Record<string, unknown>;
125-
};
143+
const body = (req.body ?? {}) as Record<string, unknown>;
126144

145+
// ── Parse messages ───────────────────────────────────
146+
const messages = body.messages as unknown[] | undefined;
127147
if (!Array.isArray(messages) || messages.length === 0) {
128148
return { status: 400, body: { error: 'messages array is required' } };
129149
}
@@ -133,8 +153,48 @@ export function buildAIRoutes(
133153
if (err) return { status: 400, body: { error: err } };
134154
}
135155

156+
// ── Resolve options ──────────────────────────────────
157+
// Accept legacy nested `options` object **or** Vercel-style
158+
// flat fields (`model`, `temperature`, `maxTokens`).
159+
const nested = (body.options ?? {}) as Record<string, unknown>;
160+
const resolvedOptions: Record<string, unknown> = {
161+
...nested,
162+
...(body.model != null && { model: body.model }),
163+
...(body.temperature != null && { temperature: body.temperature }),
164+
...(body.maxTokens != null && { maxTokens: body.maxTokens }),
165+
};
166+
167+
// ── Prepend system prompt ────────────────────────────
168+
// Vercel useChat sends `system` (or the deprecated `systemPrompt`)
169+
// as a top-level field. We prepend it as a system message.
170+
const systemPrompt = (body.system ?? body.systemPrompt) as string | undefined;
171+
const finalMessages: ModelMessage[] = [
172+
...(systemPrompt
173+
? [{ role: 'system' as const, content: systemPrompt }]
174+
: []),
175+
...(messages as ModelMessage[]),
176+
];
177+
178+
// ── Choose response mode ─────────────────────────────
179+
const wantStream = body.stream !== false;
180+
181+
if (wantStream) {
182+
// Vercel Data Stream Protocol (SSE)
183+
try {
184+
if (!aiService.streamChat) {
185+
return { status: 501, body: { error: 'Streaming is not supported by the configured AI service' } };
186+
}
187+
const events = aiService.streamChat(finalMessages, resolvedOptions as any);
188+
return { status: 200, stream: true, vercelDataStream: true, events };
189+
} catch (err) {
190+
logger.error('[AI Route] /chat stream error', err instanceof Error ? err : undefined);
191+
return { status: 500, body: { error: 'Internal AI service error' } };
192+
}
193+
}
194+
195+
// JSON response (non-streaming)
136196
try {
137-
const result = await aiService.chat(messages as ModelMessage[], options as any);
197+
const result = await aiService.chat(finalMessages, resolvedOptions as any);
138198
return { status: 200, body: result };
139199
} catch (err) {
140200
logger.error('[AI Route] /chat error', err instanceof Error ? err : undefined);
Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
// Copyright (c) 2025 ObjectStack. Licensed under the Apache-2.0 license.
2+
3+
export { encodeStreamPart, encodeVercelDataStream } from './vercel-stream-encoder.js';
Lines changed: 102 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,102 @@
1+
// Copyright (c) 2025 ObjectStack. Licensed under the Apache-2.0 license.
2+
3+
/**
4+
* Vercel Data Stream Encoder
5+
*
6+
* Converts `AsyncIterable<TextStreamPart<ToolSet>>` (the internal ObjectStack
7+
* streaming format, aligned with Vercel AI SDK types) into the Vercel AI SDK
8+
* **Data Stream Protocol** wire format.
9+
*
10+
* Each frame is a single line: `<type-code>:<JSON>\n`
11+
*
12+
* | Code | Description | Payload shape |
13+
* |:-----|:-------------------------|:-------------------------------------------------------------|
14+
* | `0` | Text delta | `"<text>"` |
15+
* | `9` | Tool call (full) | `{"toolCallId","toolName","args"}` |
16+
* | `b` | Tool call start | `{"toolCallId","toolName"}` |
17+
* | `c` | Tool call delta | `{"toolCallId","argsTextDelta"}` |
18+
* | `a` | Tool result | `{"toolCallId","result"}` |
19+
* | `d` | Finish (message-level) | `{"finishReason","usage"?}` |
20+
* | `e` | Step finish | `{"finishReason","usage"?,"isContinued"?}` |
21+
*
22+
* @see https://ai-sdk.dev/docs/ai-sdk-ui/stream-protocol
23+
*/
24+
25+
import type { TextStreamPart, ToolSet } from 'ai';
26+
27+
// ── Public API ──────────────────────────────────────────────────────
28+
29+
/**
30+
* Encode a single `TextStreamPart` event into its Vercel Data Stream frame(s).
31+
*
32+
* Returns an empty string for event types that have no wire-format mapping
33+
* (e.g. internal-only events).
34+
*/
35+
export function encodeStreamPart(part: TextStreamPart<ToolSet>): string {
36+
switch (part.type) {
37+
// ── Text ──────────────────────────────────────────────────
38+
case 'text-delta':
39+
return `0:${JSON.stringify(part.text)}\n`;
40+
41+
// ── Tool calling ─────────────────────────────────────────
42+
case 'tool-call':
43+
return `9:${JSON.stringify({
44+
toolCallId: part.toolCallId,
45+
toolName: part.toolName,
46+
args: part.input,
47+
})}\n`;
48+
49+
case 'tool-call-streaming-start':
50+
return `b:${JSON.stringify({
51+
toolCallId: part.toolCallId,
52+
toolName: part.toolName,
53+
})}\n`;
54+
55+
case 'tool-call-delta':
56+
return `c:${JSON.stringify({
57+
toolCallId: part.toolCallId,
58+
argsTextDelta: part.argsTextDelta,
59+
})}\n`;
60+
61+
case 'tool-result':
62+
return `a:${JSON.stringify({
63+
toolCallId: part.toolCallId,
64+
result: part.result,
65+
})}\n`;
66+
67+
// ── Finish / Step ────────────────────────────────────────
68+
case 'finish':
69+
return `d:${JSON.stringify({
70+
finishReason: part.finishReason,
71+
usage: part.totalUsage ?? undefined,
72+
})}\n`;
73+
74+
case 'step-finish':
75+
return `e:${JSON.stringify({
76+
finishReason: part.finishReason,
77+
usage: part.totalUsage ?? undefined,
78+
isContinued: part.isContinued ?? false,
79+
})}\n`;
80+
81+
// ── Unhandled types (silently skip) ──────────────────────
82+
default:
83+
return '';
84+
}
85+
}
86+
87+
/**
88+
* Transform an `AsyncIterable<TextStreamPart>` into an `AsyncIterable<string>`
89+
* where each yielded string is a Vercel Data Stream frame.
90+
*
91+
* Empty frames (from unmapped event types) are silently dropped.
92+
*/
93+
export async function* encodeVercelDataStream(
94+
events: AsyncIterable<TextStreamPart<ToolSet>>,
95+
): AsyncIterable<string> {
96+
for await (const part of events) {
97+
const frame = encodeStreamPart(part);
98+
if (frame) {
99+
yield frame;
100+
}
101+
}
102+
}

0 commit comments

Comments
 (0)