@@ -79,6 +79,210 @@ test('generateAnswersWithChatgptApiCompat sends expected request and aggregates
7979 assert . deepEqual ( session . conversationRecords . at ( - 1 ) , { question : 'CurrentQ' , answer : 'Hello' } )
8080} )
8181
82+ test ( 'generateAnswersWithChatgptApiCompat uses max_completion_tokens for OpenAI gpt-5 models' , async ( t ) => {
83+ t . mock . method ( console , 'debug' , ( ) => { } )
84+ setStorage ( {
85+ maxConversationContextLength : 3 ,
86+ maxResponseTokenLength : 321 ,
87+ temperature : 0.2 ,
88+ } )
89+
90+ const session = {
91+ modelName : 'chatgptApi-gpt-5.1-chat-latest' ,
92+ conversationRecords : [ ] ,
93+ isRetry : false ,
94+ }
95+ const port = createFakePort ( )
96+
97+ let capturedInit
98+ t . mock . method ( globalThis , 'fetch' , async ( _input , init ) => {
99+ capturedInit = init
100+ return createMockSseResponse ( [
101+ 'data: {"choices":[{"delta":{"content":"OK"},"finish_reason":"stop"}]}\n\n' ,
102+ ] )
103+ } )
104+
105+ await generateAnswersWithChatgptApiCompat (
106+ 'https://api.example.com/v1' ,
107+ port ,
108+ 'CurrentQ' ,
109+ session ,
110+ 'sk-test' ,
111+ { } ,
112+ 'openai' ,
113+ )
114+
115+ const body = JSON . parse ( capturedInit . body )
116+ assert . equal ( body . max_completion_tokens , 321 )
117+ assert . equal ( Object . hasOwn ( body , 'max_tokens' ) , false )
118+ } )
119+
120+ test ( 'generateAnswersWithChatgptApiCompat removes conflicting token key from extraBody' , async ( t ) => {
121+ t . mock . method ( console , 'debug' , ( ) => { } )
122+ setStorage ( {
123+ maxConversationContextLength : 3 ,
124+ maxResponseTokenLength : 222 ,
125+ temperature : 0.2 ,
126+ } )
127+
128+ const session = {
129+ modelName : 'chatgptApi4oMini' ,
130+ conversationRecords : [ ] ,
131+ isRetry : false ,
132+ }
133+ const port = createFakePort ( )
134+
135+ let capturedInit
136+ t . mock . method ( globalThis , 'fetch' , async ( _input , init ) => {
137+ capturedInit = init
138+ return createMockSseResponse ( [
139+ 'data: {"choices":[{"delta":{"content":"OK"},"finish_reason":"stop"}]}\n\n' ,
140+ ] )
141+ } )
142+
143+ await generateAnswersWithChatgptApiCompat (
144+ 'https://api.example.com/v1' ,
145+ port ,
146+ 'CurrentQ' ,
147+ session ,
148+ 'sk-test' ,
149+ {
150+ max_completion_tokens : 999 ,
151+ top_p : 0.9 ,
152+ } ,
153+ )
154+
155+ const body = JSON . parse ( capturedInit . body )
156+ assert . equal ( body . max_tokens , 222 )
157+ assert . equal ( Object . hasOwn ( body , 'max_completion_tokens' ) , false )
158+ assert . equal ( body . top_p , 0.9 )
159+ } )
160+
161+ test ( 'generateAnswersWithChatgptApiCompat removes max_tokens from extraBody for OpenAI gpt-5 models' , async ( t ) => {
162+ t . mock . method ( console , 'debug' , ( ) => { } )
163+ setStorage ( {
164+ maxConversationContextLength : 3 ,
165+ maxResponseTokenLength : 500 ,
166+ temperature : 0.2 ,
167+ } )
168+
169+ const session = {
170+ modelName : 'chatgptApi-gpt-5.1-chat-latest' ,
171+ conversationRecords : [ ] ,
172+ isRetry : false ,
173+ }
174+ const port = createFakePort ( )
175+
176+ let capturedInit
177+ t . mock . method ( globalThis , 'fetch' , async ( _input , init ) => {
178+ capturedInit = init
179+ return createMockSseResponse ( [
180+ 'data: {"choices":[{"delta":{"content":"OK"},"finish_reason":"stop"}]}\n\n' ,
181+ ] )
182+ } )
183+
184+ await generateAnswersWithChatgptApiCompat (
185+ 'https://api.example.com/v1' ,
186+ port ,
187+ 'CurrentQ' ,
188+ session ,
189+ 'sk-test' ,
190+ {
191+ max_tokens : 999 ,
192+ top_p : 0.8 ,
193+ } ,
194+ 'openai' ,
195+ )
196+
197+ const body = JSON . parse ( capturedInit . body )
198+ assert . equal ( body . max_completion_tokens , 500 )
199+ assert . equal ( Object . hasOwn ( body , 'max_tokens' ) , false )
200+ assert . equal ( body . top_p , 0.8 )
201+ } )
202+
203+ test ( 'generateAnswersWithChatgptApiCompat allows max_tokens override for compat provider' , async ( t ) => {
204+ t . mock . method ( console , 'debug' , ( ) => { } )
205+ setStorage ( {
206+ maxConversationContextLength : 3 ,
207+ maxResponseTokenLength : 400 ,
208+ temperature : 0.2 ,
209+ } )
210+
211+ const session = {
212+ modelName : 'chatgptApi4oMini' ,
213+ conversationRecords : [ ] ,
214+ isRetry : false ,
215+ }
216+ const port = createFakePort ( )
217+
218+ let capturedInit
219+ t . mock . method ( globalThis , 'fetch' , async ( _input , init ) => {
220+ capturedInit = init
221+ return createMockSseResponse ( [
222+ 'data: {"choices":[{"delta":{"content":"OK"},"finish_reason":"stop"}]}\n\n' ,
223+ ] )
224+ } )
225+
226+ await generateAnswersWithChatgptApiCompat (
227+ 'https://api.example.com/v1' ,
228+ port ,
229+ 'CurrentQ' ,
230+ session ,
231+ 'sk-test' ,
232+ {
233+ max_tokens : 333 ,
234+ top_p : 0.75 ,
235+ } ,
236+ )
237+
238+ const body = JSON . parse ( capturedInit . body )
239+ assert . equal ( body . max_tokens , 333 )
240+ assert . equal ( Object . hasOwn ( body , 'max_completion_tokens' ) , false )
241+ assert . equal ( body . top_p , 0.75 )
242+ } )
243+
244+ test ( 'generateAnswersWithChatgptApiCompat allows max_completion_tokens override for OpenAI gpt-5 models' , async ( t ) => {
245+ t . mock . method ( console , 'debug' , ( ) => { } )
246+ setStorage ( {
247+ maxConversationContextLength : 3 ,
248+ maxResponseTokenLength : 400 ,
249+ temperature : 0.2 ,
250+ } )
251+
252+ const session = {
253+ modelName : 'chatgptApi-gpt-5.1-chat-latest' ,
254+ conversationRecords : [ ] ,
255+ isRetry : false ,
256+ }
257+ const port = createFakePort ( )
258+
259+ let capturedInit
260+ t . mock . method ( globalThis , 'fetch' , async ( _input , init ) => {
261+ capturedInit = init
262+ return createMockSseResponse ( [
263+ 'data: {"choices":[{"delta":{"content":"OK"},"finish_reason":"stop"}]}\n\n' ,
264+ ] )
265+ } )
266+
267+ await generateAnswersWithChatgptApiCompat (
268+ 'https://api.example.com/v1' ,
269+ port ,
270+ 'CurrentQ' ,
271+ session ,
272+ 'sk-test' ,
273+ {
274+ max_completion_tokens : 333 ,
275+ top_p : 0.65 ,
276+ } ,
277+ 'openai' ,
278+ )
279+
280+ const body = JSON . parse ( capturedInit . body )
281+ assert . equal ( body . max_completion_tokens , 333 )
282+ assert . equal ( Object . hasOwn ( body , 'max_tokens' ) , false )
283+ assert . equal ( body . top_p , 0.65 )
284+ } )
285+
82286test ( 'generateAnswersWithChatgptApiCompat throws on non-ok response with JSON error body' , async ( t ) => {
83287 t . mock . method ( console , 'debug' , ( ) => { } )
84288 setStorage ( {
0 commit comments