@@ -37,180 +37,238 @@ class AIHandler {
3737 // 使用 Map 管理多个并行请求的 AbortController
3838 private abortControllers = new Map < string , AbortController > ( )
3939
40- public async sendMessageStreaming (
40+ /**
41+ * 准备 API 消息数组,处理 systemPrompt
42+ */
43+ private prepareApiMessages ( messages : ChatMessage [ ] , modelConfig : ModelConfig ) : ChatMessage [ ] {
44+ const apiMessages = messages . map ( ( msg ) => ( {
45+ role : msg . role ,
46+ content : msg . content
47+ } ) )
48+
49+ // 如果有systemPrompt且第一条消息不是system消息,则插入system消息
50+ if (
51+ modelConfig . systemPrompt &&
52+ ( apiMessages . length === 0 || apiMessages [ 0 ] . role !== 'system' )
53+ ) {
54+ apiMessages . unshift ( {
55+ role : 'system' ,
56+ content : modelConfig . systemPrompt
57+ } )
58+ }
59+
60+ return apiMessages
61+ }
62+
63+ /**
64+ * 获取或创建默认的 ModelConfig
65+ */
66+ private getModelConfig ( modelConfig ?: ModelConfig ) : ModelConfig {
67+ return (
68+ modelConfig || {
69+ systemPrompt : '' ,
70+ topP : 1 ,
71+ temperature : 1
72+ }
73+ )
74+ }
75+
76+ /**
77+ * 发送错误消息给渲染进程
78+ */
79+ private sendError ( event : Electron . IpcMainInvokeEvent , requestId : string , error : string ) : void {
80+ event . sender . send ( 'ai-stream-data' , {
81+ requestId,
82+ type : 'error' ,
83+ error
84+ } as AIStreamChunk )
85+ }
86+
87+ /**
88+ * 发送完成消息给渲染进程
89+ */
90+ private sendComplete (
4191 event : Electron . IpcMainInvokeEvent ,
42- request : AIRequest
92+ requestId : string ,
93+ content : string ,
94+ reasoning_content ?: string
95+ ) : void {
96+ event . sender . send ( 'ai-stream-data' , {
97+ requestId,
98+ type : 'complete' ,
99+ content,
100+ reasoning_content : reasoning_content || undefined
101+ } as AIStreamChunk )
102+ }
103+
104+ /**
105+ * 创建并发送 API 请求
106+ */
107+ private async fetchStreamingResponse (
108+ request : AIRequest ,
109+ apiMessages : ChatMessage [ ] ,
110+ modelConfig : ModelConfig ,
111+ abortController : AbortController
112+ ) : Promise < Response > {
113+ return await fetch ( `${ request . llmConfig . apiHost . replace ( / \/ $ / , '' ) } /chat/completions` , {
114+ method : 'POST' ,
115+ headers : {
116+ 'Content-Type' : 'application/json' ,
117+ Authorization : `Bearer ${ request . llmConfig . apiKey } `
118+ } ,
119+ body : JSON . stringify ( {
120+ model : request . llmConfig . modelName ,
121+ messages : apiMessages ,
122+ temperature : modelConfig . temperature ,
123+ top_p : modelConfig . topP ,
124+ stream : true
125+ } ) ,
126+ signal : abortController . signal
127+ } )
128+ }
129+
130+ /**
131+ * 创建 SSE 事件解析器
132+ */
133+ private createStreamParser (
134+ event : Electron . IpcMainInvokeEvent ,
135+ requestId : string ,
136+ fullResponse : { value : string } ,
137+ fullReasoning : { value : string }
138+ ) {
139+ return createParser ( {
140+ onEvent : ( eventData ) => {
141+ if ( eventData . data === '[DONE]' ) {
142+ this . sendComplete ( event , requestId , fullResponse . value , fullReasoning . value )
143+ return
144+ }
145+
146+ try {
147+ const parsed = JSON . parse ( eventData . data )
148+ const delta = parsed . choices ?. [ 0 ] ?. delta
149+ const content = delta ?. content
150+ const reasoning_content =
151+ delta ?. reasoning_content ||
152+ delta ?. reasoning ||
153+ parsed . reasoning_content ||
154+ parsed ?. reasoning
155+
156+ if ( content ) {
157+ fullResponse . value += content
158+ event . sender . send ( 'ai-stream-data' , {
159+ requestId,
160+ type : 'chunk' ,
161+ content : content
162+ } as AIStreamChunk )
163+ }
164+
165+ if ( reasoning_content ) {
166+ fullReasoning . value += reasoning_content
167+ event . sender . send ( 'ai-stream-data' , {
168+ requestId,
169+ type : 'reasoning_content' ,
170+ reasoning_content : reasoning_content
171+ } as AIStreamChunk )
172+ }
173+ } catch ( e ) {
174+ console . warn ( 'Failed to parse streaming data:' , e )
175+ }
176+ }
177+ } )
178+ }
179+
180+ /**
181+ * 处理流式响应
182+ */
183+ private async processStreamResponse (
184+ reader : ReadableStreamDefaultReader < Uint8Array > ,
185+ parser : ReturnType < typeof createParser > ,
186+ event : Electron . IpcMainInvokeEvent ,
187+ requestId : string ,
188+ fullResponse : { value : string } ,
189+ fullReasoning : { value : string }
43190 ) : Promise < void > {
191+ const decoder = new TextDecoder ( )
192+
44193 try {
45- // 为当前请求创建新的AbortController
46- const abortController = new AbortController ( )
47- this . abortControllers . set ( request . requestId , abortController )
48-
49- // 准备消息数组,如果有systemPrompt,插入system消息
50- const apiMessages = request . messages . map ( ( msg ) => ( {
51- role : msg . role ,
52- content : msg . content
53- } ) )
54-
55- let modelConfig = request . modelConfig
56- if ( ! request . modelConfig ) {
57- modelConfig = {
58- systemPrompt : '' ,
59- topP : 1 ,
60- temperature : 1
61- }
194+ while ( true ) {
195+ const { done, value } = await reader . read ( )
196+ if ( done ) break
197+
198+ const chunk = decoder . decode ( value , { stream : true } )
199+ parser . feed ( chunk )
62200 }
63201
64- // 如果有systemPrompt且第一条消息不是system消息,则插入system消息
65- if (
66- modelConfig . systemPrompt &&
67- ( apiMessages . length === 0 || apiMessages [ 0 ] . role !== 'system' )
68- ) {
69- apiMessages . unshift ( {
70- role : 'system' ,
71- content : modelConfig . systemPrompt
72- } )
202+ this . sendComplete ( event , requestId , fullResponse . value , fullReasoning . value )
203+ } catch ( error ) {
204+ if ( error instanceof Error && error . name === 'AbortError' ) {
205+ this . sendComplete ( event , requestId , fullResponse . value , fullReasoning . value )
206+ } else {
207+ this . sendError (
208+ event ,
209+ requestId ,
210+ error instanceof Error ? error . message : 'Unknown error'
211+ )
73212 }
213+ }
214+ }
74215
75- const response = await fetch (
76- `${ request . llmConfig . apiHost . replace ( / \/ $ / , '' ) } /chat/completions` ,
77- {
78- method : 'POST' ,
79- headers : {
80- 'Content-Type' : 'application/json' ,
81- Authorization : `Bearer ${ request . llmConfig . apiKey } `
82- } ,
83- body : JSON . stringify ( {
84- model : request . llmConfig . modelName ,
85- messages : apiMessages ,
86- temperature : modelConfig . temperature ,
87- top_p : modelConfig . topP ,
88- stream : true
89- } ) ,
90- signal : abortController . signal
91- }
216+ public async sendMessageStreaming (
217+ event : Electron . IpcMainInvokeEvent ,
218+ request : AIRequest
219+ ) : Promise < void > {
220+ const abortController = new AbortController ( )
221+ this . abortControllers . set ( request . requestId , abortController )
222+
223+ try {
224+ const modelConfig = this . getModelConfig ( request . modelConfig )
225+ const apiMessages = this . prepareApiMessages ( request . messages , modelConfig )
226+
227+ const response = await this . fetchStreamingResponse (
228+ request ,
229+ apiMessages ,
230+ modelConfig ,
231+ abortController
92232 )
93233
94234 if ( ! response . ok ) {
95- event . sender . send ( 'ai-stream-data' , {
96- requestId : request . requestId ,
97- type : 'error' ,
98- error : `HTTP error! status: ${ response . status } `
99- } as AIStreamChunk )
235+ this . sendError ( event , request . requestId , `HTTP error! status: ${ response . status } ` )
100236 return
101237 }
102238
103239 const reader = response . body ?. getReader ( )
104240 if ( ! reader ) {
105- event . sender . send ( 'ai-stream-data' , {
106- requestId : request . requestId ,
107- type : 'error' ,
108- error : 'No response body reader available'
109- } as AIStreamChunk )
241+ this . sendError ( event , request . requestId , 'No response body reader available' )
110242 return
111243 }
112244
113- const decoder = new TextDecoder ( )
114- let fullResponse = ''
115- let fullReasoning = ''
116-
117- // 使用 eventsource-parser 来解析 SSE 数据
118- const parser = createParser ( {
119- onEvent : ( eventData ) => {
120- if ( eventData . data === '[DONE]' ) {
121- event . sender . send ( 'ai-stream-data' , {
122- requestId : request . requestId ,
123- type : 'complete' ,
124- content : fullResponse ,
125- reasoning_content : fullReasoning || undefined
126- } as AIStreamChunk )
127- return
128- }
129-
130- try {
131- const parsed = JSON . parse ( eventData . data )
132- const delta = parsed . choices ?. [ 0 ] ?. delta
133- const content = delta ?. content
134- const reasoning_content =
135- delta ?. reasoning_content ||
136- delta ?. reasoning ||
137- parsed . reasoning_content ||
138- parsed ?. reasoning
139-
140- if ( content ) {
141- fullResponse += content
142- event . sender . send ( 'ai-stream-data' , {
143- requestId : request . requestId ,
144- type : 'chunk' ,
145- content : content
146- } as AIStreamChunk )
147- }
148-
149- if ( reasoning_content ) {
150- fullReasoning += reasoning_content
151- event . sender . send ( 'ai-stream-data' , {
152- requestId : request . requestId ,
153- type : 'reasoning_content' ,
154- reasoning_content : reasoning_content
155- } as AIStreamChunk )
156- }
157- } catch ( e ) {
158- // 忽略解析错误,继续处理下一行
159- console . warn ( 'Failed to parse streaming data:' , e )
160- }
161- }
162- } )
163-
164- try {
165- while ( true ) {
166- const { done, value } = await reader . read ( )
245+ const fullResponse = { value : '' }
246+ const fullReasoning = { value : '' }
167247
168- if ( done ) break
169-
170- const chunk = decoder . decode ( value , { stream : true } )
171- parser . feed ( chunk )
172- }
248+ const parser = this . createStreamParser (
249+ event ,
250+ request . requestId ,
251+ fullResponse ,
252+ fullReasoning
253+ )
173254
174- event . sender . send ( 'ai-stream-data' , {
175- requestId : request . requestId ,
176- type : 'complete' ,
177- content : fullResponse ,
178- reasoning_content : fullReasoning || undefined
179- } as AIStreamChunk )
180- } catch ( error ) {
181- if ( error instanceof Error && error . name === 'AbortError' ) {
182- // 请求被中断
183- event . sender . send ( 'ai-stream-data' , {
184- requestId : request . requestId ,
185- type : 'complete' ,
186- content : fullResponse ,
187- reasoning_content : fullReasoning || undefined
188- } as AIStreamChunk )
189- } else {
190- event . sender . send ( 'ai-stream-data' , {
191- requestId : request . requestId ,
192- type : 'error' ,
193- error : error instanceof Error ? error . message : 'Unknown error'
194- } as AIStreamChunk )
195- }
196- } finally {
197- this . abortControllers . delete ( request . requestId )
198- }
255+ await this . processStreamResponse (
256+ reader ,
257+ parser ,
258+ event ,
259+ request . requestId ,
260+ fullResponse ,
261+ fullReasoning
262+ )
199263 } catch ( error ) {
200264 if ( error instanceof Error && error . name === 'AbortError' ) {
201- // 请求被中断,发送当前内容作为完成状态
202- event . sender . send ( 'ai-stream-data' , {
203- requestId : request . requestId ,
204- type : 'complete' ,
205- content : '' ,
206- reasoning_content : undefined
207- } as AIStreamChunk )
265+ this . sendComplete ( event , request . requestId , '' , undefined )
208266 } else {
209- event . sender . send ( 'ai-stream-data' , {
210- requestId : request . requestId ,
211- type : 'error' ,
212- error : error instanceof Error ? error . message : 'Unknown error'
213- } as AIStreamChunk )
267+ this . sendError (
268+ event ,
269+ request . requestId ,
270+ error instanceof Error ? error . message : 'Unknown error'
271+ )
214272 }
215273 } finally {
216274 this . abortControllers . delete ( request . requestId )
0 commit comments