11import { sew } from "@/actions" ;
2- import { _getConfiguredLanguageModelsFull , _getAISDKLanguageModelAndOptions , updateChatMessages } from "@/features/chat/actions" ;
3- import { runAgentBlocking } from "@/features/chat/agent" ;
4- import { ANSWER_TAG } from "@/features/chat/constants" ;
5- import { LanguageModelInfo , SBChatMessage , Source } from "@/features/chat/types" ;
6- import { convertLLMOutputToPortableMarkdown , getLanguageModelKey } from "@/features/chat/utils" ;
2+ import { _getConfiguredLanguageModelsFull , _getAISDKLanguageModelAndOptions , updateChatMessages , generateAndUpdateChatNameFromMessage } from "@/features/chat/actions" ;
3+ import { SBChatMessage , Source } from "@/features/chat/types" ;
4+ import { convertLLMOutputToPortableMarkdown , getAnswerPartFromAssistantMessage } from "@/features/chat/utils" ;
75import { ErrorCode } from "@/lib/errorCodes" ;
86import { requestBodySchemaValidationError , ServiceError , serviceErrorResponse } from "@/lib/serviceError" ;
97import { isServiceError } from "@/lib/utils" ;
@@ -16,6 +14,8 @@ import { StatusCodes } from "http-status-codes";
1614import { headers } from "next/headers" ;
1715import { NextResponse } from "next/server" ;
1816import { z } from "zod" ;
17+ import { createMessageStream } from "../route" ;
18+ import { InferUIMessageChunk , UITools , UIDataTypes , UIMessage } from "ai" ;
1919
2020const logger = createLogger ( 'chat-blocking-api' ) ;
2121
@@ -26,36 +26,15 @@ const logger = createLogger('chat-blocking-api');
2626const blockingChatRequestSchema = z . object ( {
2727 // The question to ask about the codebase
2828 question : z . string ( ) . min ( 1 , "Question is required" ) ,
29- // Optional: filter to specific repositories (by name)
30- repos : z . array ( z . string ( ) ) . optional ( ) ,
31- // Optional: specify a language model (defaults to first configured model)
32- languageModel : z . object ( {
33- provider : z . string ( ) ,
34- model : z . string ( ) ,
35- displayName : z . string ( ) . optional ( ) ,
36- } ) . optional ( ) ,
3729} ) ;
3830
3931/**
4032 * Response schema for the blocking chat API.
4133 */
4234interface BlockingChatResponse {
43- // The agent's final answer (markdown format)
4435 answer : string ;
45- // ID of the persisted chat session
4636 chatId : string ;
47- // URL to view the chat in the web UI
4837 chatUrl : string ;
49- // Files the agent referenced during research
50- sources : Source [ ] ;
51- // Metadata about the response
52- metadata : {
53- totalTokens : number ;
54- inputTokens : number ;
55- outputTokens : number ;
56- totalResponseTimeMs : number ;
57- modelName : string ;
58- } ;
5938}
6039
6140/**
@@ -70,12 +49,12 @@ interface BlockingChatResponse {
7049export async function POST ( request : Request ) {
7150 const requestBody = await request . json ( ) ;
7251 const parsed = await blockingChatRequestSchema . safeParseAsync ( requestBody ) ;
73-
52+
7453 if ( ! parsed . success ) {
7554 return serviceErrorResponse ( requestBodySchemaValidationError ( parsed . error ) ) ;
7655 }
7756
78- const { question, repos , languageModel : requestedLanguageModel } = parsed . data ;
57+ const { question } = parsed . data ;
7958
8059 const response : BlockingChatResponse | ServiceError = await sew ( ( ) =>
8160 withOptionalAuthV2 ( async ( { org, user, prisma } ) => {
@@ -89,64 +68,13 @@ export async function POST(request: Request) {
8968 } satisfies ServiceError ;
9069 }
9170
92- // Select the language model to use
93- let languageModelConfig = configuredModels [ 0 ] ; // Default to first configured model
94-
95- if ( requestedLanguageModel ) {
96- const requested = requestedLanguageModel as LanguageModelInfo ;
97- const found = configuredModels . find (
98- ( model ) => getLanguageModelKey ( model ) === getLanguageModelKey ( requested )
99- ) ;
100- if ( ! found ) {
101- return {
102- statusCode : StatusCodes . BAD_REQUEST ,
103- errorCode : ErrorCode . INVALID_REQUEST_BODY ,
104- message : `Language model ${ requested . model } is not configured.` ,
105- } satisfies ServiceError ;
106- }
107- languageModelConfig = found ;
108- }
71+ // @todo : we should probably have a option of passing the language model
72+ // into the request body. For now, just use the first configured model.
73+ const languageModelConfig = configuredModels [ 0 ] ;
10974
110-
11175 const { model, providerOptions } = await _getAISDKLanguageModelAndOptions ( languageModelConfig ) ;
11276 const modelName = languageModelConfig . displayName ?? languageModelConfig . model ;
11377
114- // Determine which repos to search
115- let searchScopeRepoNames : string [ ] ;
116-
117- if ( repos && repos . length > 0 ) {
118- // Use the provided repos filter
119- // Validate that these repos exist and the user has access
120- const validRepos = await prisma . repo . findMany ( {
121- where : {
122- orgId : org . id ,
123- name : {
124- in : repos ,
125- } ,
126- } ,
127- select : { name : true } ,
128- } ) ;
129-
130- searchScopeRepoNames = validRepos . map ( r => r . name ) ;
131-
132- if ( searchScopeRepoNames . length === 0 ) {
133- return {
134- statusCode : StatusCodes . BAD_REQUEST ,
135- errorCode : ErrorCode . INVALID_REQUEST_BODY ,
136- message : "None of the specified repositories were found or accessible." ,
137- } satisfies ServiceError ;
138- }
139- } else {
140- // Search all repos the user has access to
141- const allRepos = await prisma . repo . findMany ( {
142- where : {
143- orgId : org . id ,
144- } ,
145- select : { name : true } ,
146- } ) ;
147- searchScopeRepoNames = allRepos . map ( r => r . name ) ;
148- }
149-
15078 // Create a new chat session
15179 const chat = await prisma . chat . create ( {
15280 data : {
@@ -157,92 +85,76 @@ export async function POST(request: Request) {
15785 } ,
15886 } ) ;
15987
160- const traceId = randomUUID ( ) ;
161-
16288 // Run the agent to completion
163- logger . info ( `Starting blocking agent for chat ${ chat . id } ` , {
89+ logger . debug ( `Starting blocking agent for chat ${ chat . id } ` , {
16490 chatId : chat . id ,
16591 question : question . substring ( 0 , 100 ) ,
166- repoCount : searchScopeRepoNames . length ,
16792 model : modelName ,
16893 } ) ;
16994
170- const agentResult = await runAgentBlocking ( {
171- model,
172- providerOptions,
173- searchScopeRepoNames,
174- inputMessages : [ { role : 'user' , content : question } ] ,
175- inputSources : [ ] ,
176- traceId,
177- } ) ;
178-
179- // Extract the answer (removing the answer tag if present)
180- let answer = agentResult . text ;
181- if ( answer . startsWith ( ANSWER_TAG ) ) {
182- answer = answer . slice ( ANSWER_TAG . length ) . trim ( ) ;
183- }
184-
185- // Convert to portable markdown (replaces @file: references with markdown links)
186- const portableAnswer = convertLLMOutputToPortableMarkdown ( answer ) ;
187-
188- // Build the chat URL
189- const headersList = await headers ( ) ;
190- const baseUrl = getBaseUrl ( headersList ) ;
191- const chatUrl = `${ baseUrl } /${ org . domain } /chat/${ chat . id } ` ;
192-
193- // Create the message history for persistence
95+ // Create the initial user message
19496 const userMessage : SBChatMessage = {
19597 id : randomUUID ( ) ,
19698 role : 'user' ,
19799 parts : [ { type : 'text' , text : question } ] ,
198100 } ;
199101
200- const assistantMessage : SBChatMessage = {
201- id : randomUUID ( ) ,
202- role : 'assistant' ,
203- parts : [
204- { type : 'text' , text : agentResult . text } ,
205- // Include sources as data parts
206- ...agentResult . sources . map ( ( source ) => ( {
207- type : 'data-source' as const ,
208- data : source ,
209- } ) ) ,
210- ] ,
211- metadata : {
212- totalTokens : agentResult . usage . totalTokens ,
213- totalInputTokens : agentResult . usage . inputTokens ,
214- totalOutputTokens : agentResult . usage . outputTokens ,
215- totalResponseTimeMs : agentResult . responseTimeMs ,
216- modelName,
217- traceId,
102+ // We'll capture the final messages and usage from the stream
103+ let finalMessages : SBChatMessage [ ] = [ ] ;
104+
105+ const stream = await createMessageStream ( {
106+ messages : [ userMessage ] ,
107+ selectedSearchScopes : [ ] ,
108+ model,
109+ modelName,
110+ modelProviderOptions : providerOptions ,
111+ orgId : org . id ,
112+ prisma,
113+ onFinish : async ( { messages } ) => {
114+ finalMessages = messages ;
218115 } ,
219- } ;
116+ } )
117+
118+ await Promise . all ( [
119+ // Consume the stream fully to trigger onFinish
120+ blockStreamUntilFinish ( stream ) ,
121+ // Generate and update the chat name
122+ generateAndUpdateChatNameFromMessage ( {
123+ chatId : chat . id ,
124+ languageModelId : languageModelConfig . model ,
125+ message : question ,
126+ } )
127+ ] ) ;
220128
221129 // Persist the messages to the chat
222130 await updateChatMessages ( {
223131 chatId : chat . id ,
224- messages : [ userMessage , assistantMessage ] ,
132+ messages : finalMessages ,
225133 } ) ;
226134
227- logger . info ( `Completed blocking agent for chat ${ chat . id } ` , {
135+ // Extract the answer text from the assistant message
136+ const assistantMessage = finalMessages . find ( m => m . role === 'assistant' ) ;
137+ const answerPart = assistantMessage
138+ ? getAnswerPartFromAssistantMessage ( assistantMessage , false )
139+ : undefined ;
140+ const answerText = answerPart ?. text ?? '' ;
141+
142+ // Convert to portable markdown (replaces @file: references with markdown links)
143+ const portableAnswer = convertLLMOutputToPortableMarkdown ( answerText ) ;
144+
145+ // Build the chat URL
146+ const headersList = await headers ( ) ;
147+ const baseUrl = getBaseUrl ( headersList ) ;
148+ const chatUrl = `${ baseUrl } /${ org . domain } /chat/${ chat . id } ` ;
149+
150+ logger . debug ( `Completed blocking agent for chat ${ chat . id } ` , {
228151 chatId : chat . id ,
229- responseTimeMs : agentResult . responseTimeMs ,
230- totalTokens : agentResult . usage . totalTokens ,
231- sourceCount : agentResult . sources . length ,
232152 } ) ;
233153
234154 return {
235155 answer : portableAnswer ,
236156 chatId : chat . id ,
237157 chatUrl,
238- sources : agentResult . sources ,
239- metadata : {
240- totalTokens : agentResult . usage . totalTokens ,
241- inputTokens : agentResult . usage . inputTokens ,
242- outputTokens : agentResult . usage . outputTokens ,
243- totalResponseTimeMs : agentResult . responseTimeMs ,
244- modelName,
245- } ,
246158 } satisfies BlockingChatResponse ;
247159 } )
248160 ) ;
@@ -251,7 +163,13 @@ export async function POST(request: Request) {
251163 return serviceErrorResponse ( response ) ;
252164 }
253165
254- console . log ( response ) ;
255-
256166 return NextResponse . json ( response ) ;
257167}
168+
169+ const blockStreamUntilFinish = async < T extends UIMessage < unknown , UIDataTypes , UITools > > ( stream : ReadableStream < InferUIMessageChunk < T > > ) => {
170+ const reader = stream . getReader ( ) ;
171+ while ( true as const ) {
172+ const { done } = await reader . read ( ) ;
173+ if ( done ) break ;
174+ }
175+ }
0 commit comments