@@ -209,6 +209,92 @@ describe("Together AI compatibility", () => {
209209 } ) ;
210210} ) ;
211211
212+ describe ( "OpenAI-compatible path prefix normalization" , ( ) => {
213+ it ( "normalizes /v4/chat/completions to /v1/chat/completions" , async ( ) => {
214+ instance = await createServer ( CATCH_ALL_FIXTURES ) ;
215+
216+ const { status, body } = await httpPost ( `${ instance . url } /v4/chat/completions` , {
217+ model : "bigmodel-4" ,
218+ stream : false ,
219+ messages : [ { role : "user" , content : "hello" } ] ,
220+ } ) ;
221+
222+ expect ( status ) . toBe ( 200 ) ;
223+ const parsed = JSON . parse ( body ) ;
224+ expect ( parsed . choices ) . toBeDefined ( ) ;
225+ expect ( parsed . choices [ 0 ] . message . content ) . toBe ( "Hello from aimock!" ) ;
226+ expect ( parsed . object ) . toBe ( "chat.completion" ) ;
227+ } ) ;
228+
229+ it ( "normalizes /api/coding/paas/v4/chat/completions to /v1/chat/completions" , async ( ) => {
230+ instance = await createServer ( CATCH_ALL_FIXTURES ) ;
231+
232+ const { status, body } = await httpPost ( `${ instance . url } /api/coding/paas/v4/chat/completions` , {
233+ model : "bigmodel-4" ,
234+ stream : false ,
235+ messages : [ { role : "user" , content : "hello" } ] ,
236+ } ) ;
237+
238+ expect ( status ) . toBe ( 200 ) ;
239+ const parsed = JSON . parse ( body ) ;
240+ expect ( parsed . choices ) . toBeDefined ( ) ;
241+ expect ( parsed . choices [ 0 ] . message . content ) . toBe ( "Hello from aimock!" ) ;
242+ expect ( parsed . object ) . toBe ( "chat.completion" ) ;
243+ } ) ;
244+
245+ it ( "still handles standard /v1/chat/completions (regression)" , async ( ) => {
246+ instance = await createServer ( CATCH_ALL_FIXTURES ) ;
247+
248+ const { status, body } = await httpPost ( `${ instance . url } /v1/chat/completions` , {
249+ model : "gpt-4o" ,
250+ stream : false ,
251+ messages : [ { role : "user" , content : "hello" } ] ,
252+ } ) ;
253+
254+ expect ( status ) . toBe ( 200 ) ;
255+ const parsed = JSON . parse ( body ) ;
256+ expect ( parsed . choices ) . toBeDefined ( ) ;
257+ expect ( parsed . choices [ 0 ] . message . content ) . toBe ( "Hello from aimock!" ) ;
258+ expect ( parsed . object ) . toBe ( "chat.completion" ) ;
259+ } ) ;
260+
261+ it ( "normalizes /custom/embeddings to /v1/embeddings" , async ( ) => {
262+ instance = await createServer ( CATCH_ALL_FIXTURES ) ;
263+
264+ const { status, body } = await httpPost ( `${ instance . url } /custom/embeddings` , {
265+ model : "text-embedding-3-small" ,
266+ input : "test embedding via custom prefix" ,
267+ } ) ;
268+
269+ expect ( status ) . toBe ( 200 ) ;
270+ const parsed = JSON . parse ( body ) ;
271+ expect ( parsed . object ) . toBe ( "list" ) ;
272+ expect ( parsed . data [ 0 ] . embedding ) . toBeInstanceOf ( Array ) ;
273+ } ) ;
274+
275+ it ( "combines /openai/ prefix strip with path normalization" , async ( ) => {
276+ instance = await createServer ( CATCH_ALL_FIXTURES ) ;
277+
278+ // /openai/v1/chat/completions is the Groq-style path — the /openai/ strip
279+ // should still work alongside the new normalization logic
280+ const { status, body } = await httpPost (
281+ `${ instance . url } /openai/v1/chat/completions` ,
282+ {
283+ model : "llama-3.3-70b-versatile" ,
284+ stream : false ,
285+ messages : [ { role : "user" , content : "hello" } ] ,
286+ } ,
287+ { Authorization : "Bearer mock-groq-key" } ,
288+ ) ;
289+
290+ expect ( status ) . toBe ( 200 ) ;
291+ const parsed = JSON . parse ( body ) ;
292+ expect ( parsed . choices ) . toBeDefined ( ) ;
293+ expect ( parsed . choices [ 0 ] . message . content ) . toBe ( "Hello from aimock!" ) ;
294+ expect ( parsed . object ) . toBe ( "chat.completion" ) ;
295+ } ) ;
296+ } ) ;
297+
212298describe ( "vLLM compatibility" , ( ) => {
213299 // vLLM uses standard /v1/chat/completions with custom model names
214300 it ( "handles vLLM-style request via /v1/chat/completions" , async ( ) => {
0 commit comments