@@ -63,6 +63,14 @@ export interface RouteResponse {
6363 stream ?: boolean ;
6464 /** Async iterable of SSE events (when stream=true) */
6565 events ?: AsyncIterable < unknown > ;
66+ /**
67+ * When `true`, the HTTP server layer should encode the `events` iterable
68+ * using the Vercel AI Data Stream Protocol frame format (`0:`, `9:`, `d:`, …)
69+ * instead of generic SSE `data:` lines.
70+ *
71+ * @see https://ai-sdk.dev/docs/ai-sdk-ui/stream-protocol
72+ */
73+ vercelDataStream ?: boolean ;
6674}
6775
6876/** Valid message roles accepted by the AI routes. */
@@ -71,6 +79,9 @@ const VALID_ROLES = new Set<string>(['system', 'user', 'assistant', 'tool']);
7179/**
7280 * Validate that `raw` is a well-formed message.
7381 * Returns null on success, or an error string on failure.
82+ *
83+ * Accepts both simple string content (legacy) and Vercel AI SDK array content
84+ * (e.g. `[{ type: 'text', text: '...' }]`).
7485 */
7586function validateMessage ( raw : unknown ) : string | null {
7687 if ( typeof raw !== 'object' || raw === null ) {
@@ -80,8 +91,9 @@ function validateMessage(raw: unknown): string | null {
8091 if ( typeof msg . role !== 'string' || ! VALID_ROLES . has ( msg . role ) ) {
8192 return `message.role must be one of ${ [ ...VALID_ROLES ] . map ( r => `"${ r } "` ) . join ( ', ' ) } ` ;
8293 }
83- if ( typeof msg . content !== 'string' ) {
84- return 'message.content must be a string' ;
94+ // Accept string content (legacy) or array content (Vercel multi-part)
95+ if ( typeof msg . content !== 'string' && ! Array . isArray ( msg . content ) ) {
96+ return 'message.content must be a string or an array' ;
8597 }
8698 return null ;
8799}
@@ -112,18 +124,26 @@ export function buildAIRoutes(
112124) : RouteDefinition [ ] {
113125 return [
114126 // ── Chat ────────────────────────────────────────────────────
127+ //
128+ // Dual-mode endpoint compatible with both the legacy ObjectStack
129+ // format (`{ messages, options }`) and the Vercel AI SDK useChat
130+ // flat format (`{ messages, system, model, stream, … }`).
131+ //
132+ // Behaviour:
133+ // • `stream !== false` → Vercel Data Stream Protocol (SSE)
134+ // • `stream === false` → JSON response (legacy)
135+ //
115136 {
116137 method : 'POST' ,
117138 path : '/api/v1/ai/chat' ,
118- description : 'Synchronous chat completion' ,
139+ description : 'Chat completion (supports Vercel AI Data Stream Protocol) ' ,
119140 auth : true ,
120141 permissions : [ 'ai:chat' ] ,
121142 handler : async ( req ) => {
122- const { messages, options } = ( req . body ?? { } ) as {
123- messages ?: unknown [ ] ;
124- options ?: Record < string , unknown > ;
125- } ;
143+ const body = ( req . body ?? { } ) as Record < string , unknown > ;
126144
145+ // ── Parse messages ───────────────────────────────────
146+ const messages = body . messages as unknown [ ] | undefined ;
127147 if ( ! Array . isArray ( messages ) || messages . length === 0 ) {
128148 return { status : 400 , body : { error : 'messages array is required' } } ;
129149 }
@@ -133,8 +153,48 @@ export function buildAIRoutes(
133153 if ( err ) return { status : 400 , body : { error : err } } ;
134154 }
135155
156+ // ── Resolve options ──────────────────────────────────
157+ // Accept legacy nested `options` object **or** Vercel-style
158+ // flat fields (`model`, `temperature`, `maxTokens`).
159+ const nested = ( body . options ?? { } ) as Record < string , unknown > ;
160+ const resolvedOptions : Record < string , unknown > = {
161+ ...nested ,
162+ ...( body . model != null && { model : body . model } ) ,
163+ ...( body . temperature != null && { temperature : body . temperature } ) ,
164+ ...( body . maxTokens != null && { maxTokens : body . maxTokens } ) ,
165+ } ;
166+
167+ // ── Prepend system prompt ────────────────────────────
168+ // Vercel useChat sends `system` (or the deprecated `systemPrompt`)
169+ // as a top-level field. We prepend it as a system message.
170+ const systemPrompt = ( body . system ?? body . systemPrompt ) as string | undefined ;
171+ const finalMessages : ModelMessage [ ] = [
172+ ...( systemPrompt
173+ ? [ { role : 'system' as const , content : systemPrompt } ]
174+ : [ ] ) ,
175+ ...( messages as ModelMessage [ ] ) ,
176+ ] ;
177+
178+ // ── Choose response mode ─────────────────────────────
179+ const wantStream = body . stream !== false ;
180+
181+ if ( wantStream ) {
182+ // Vercel Data Stream Protocol (SSE)
183+ try {
184+ if ( ! aiService . streamChat ) {
185+ return { status : 501 , body : { error : 'Streaming is not supported by the configured AI service' } } ;
186+ }
187+ const events = aiService . streamChat ( finalMessages , resolvedOptions as any ) ;
188+ return { status : 200 , stream : true , vercelDataStream : true , events } ;
189+ } catch ( err ) {
190+ logger . error ( '[AI Route] /chat stream error' , err instanceof Error ? err : undefined ) ;
191+ return { status : 500 , body : { error : 'Internal AI service error' } } ;
192+ }
193+ }
194+
195+ // JSON response (non-streaming)
136196 try {
137- const result = await aiService . chat ( messages as ModelMessage [ ] , options as any ) ;
197+ const result = await aiService . chat ( finalMessages , resolvedOptions as any ) ;
138198 return { status : 200 , body : result } ;
139199 } catch ( err ) {
140200 logger . error ( '[AI Route] /chat error' , err instanceof Error ? err : undefined ) ;
0 commit comments