|
1 | 1 | import { describe, it, expect, afterEach } from "vitest"; |
| 2 | +import http from "node:http"; |
| 3 | +import crypto from "node:crypto"; |
2 | 4 | import { createServer, type ServerInstance } from "../server.js"; |
3 | 5 | import type { Fixture } from "../types.js"; |
4 | 6 |
|
@@ -209,6 +211,248 @@ describe("Together AI compatibility", () => { |
209 | 211 | }); |
210 | 212 | }); |
211 | 213 |
|
| 214 | +describe("OpenAI-compatible path prefix normalization", () => { |
| 215 | + it("normalizes /v4/chat/completions to /v1/chat/completions", async () => { |
| 216 | + instance = await createServer(CATCH_ALL_FIXTURES); |
| 217 | + |
| 218 | + const { status, body } = await httpPost(`${instance.url}/v4/chat/completions`, { |
| 219 | + model: "bigmodel-4", |
| 220 | + stream: false, |
| 221 | + messages: [{ role: "user", content: "hello" }], |
| 222 | + }); |
| 223 | + |
| 224 | + expect(status).toBe(200); |
| 225 | + const parsed = JSON.parse(body); |
| 226 | + expect(parsed.choices).toBeDefined(); |
| 227 | + expect(parsed.choices[0].message.content).toBe("Hello from aimock!"); |
| 228 | + expect(parsed.object).toBe("chat.completion"); |
| 229 | + }); |
| 230 | + |
| 231 | + it("normalizes /api/coding/paas/v4/chat/completions to /v1/chat/completions", async () => { |
| 232 | + instance = await createServer(CATCH_ALL_FIXTURES); |
| 233 | + |
| 234 | + const { status, body } = await httpPost(`${instance.url}/api/coding/paas/v4/chat/completions`, { |
| 235 | + model: "bigmodel-4", |
| 236 | + stream: false, |
| 237 | + messages: [{ role: "user", content: "hello" }], |
| 238 | + }); |
| 239 | + |
| 240 | + expect(status).toBe(200); |
| 241 | + const parsed = JSON.parse(body); |
| 242 | + expect(parsed.choices).toBeDefined(); |
| 243 | + expect(parsed.choices[0].message.content).toBe("Hello from aimock!"); |
| 244 | + expect(parsed.object).toBe("chat.completion"); |
| 245 | + }); |
| 246 | + |
| 247 | + it("still handles standard /v1/chat/completions (regression)", async () => { |
| 248 | + instance = await createServer(CATCH_ALL_FIXTURES); |
| 249 | + |
| 250 | + const { status, body } = await httpPost(`${instance.url}/v1/chat/completions`, { |
| 251 | + model: "gpt-4o", |
| 252 | + stream: false, |
| 253 | + messages: [{ role: "user", content: "hello" }], |
| 254 | + }); |
| 255 | + |
| 256 | + expect(status).toBe(200); |
| 257 | + const parsed = JSON.parse(body); |
| 258 | + expect(parsed.choices).toBeDefined(); |
| 259 | + expect(parsed.choices[0].message.content).toBe("Hello from aimock!"); |
| 260 | + expect(parsed.object).toBe("chat.completion"); |
| 261 | + }); |
| 262 | + |
| 263 | + it("normalizes /custom/embeddings to /v1/embeddings", async () => { |
| 264 | + instance = await createServer(CATCH_ALL_FIXTURES); |
| 265 | + |
| 266 | + const { status, body } = await httpPost(`${instance.url}/custom/embeddings`, { |
| 267 | + model: "text-embedding-3-small", |
| 268 | + input: "test embedding via custom prefix", |
| 269 | + }); |
| 270 | + |
| 271 | + expect(status).toBe(200); |
| 272 | + const parsed = JSON.parse(body); |
| 273 | + expect(parsed.object).toBe("list"); |
| 274 | + expect(parsed.data[0].embedding).toBeInstanceOf(Array); |
| 275 | + }); |
| 276 | + |
| 277 | + it("combines /openai/ prefix strip with normalization for non-v1 paths", async () => { |
| 278 | + instance = await createServer(CATCH_ALL_FIXTURES); |
| 279 | + |
| 280 | + // /openai/v4/chat/completions — strip /openai/ then normalize /v4/ to /v1/ |
| 281 | + const { status, body } = await httpPost( |
| 282 | + `${instance.url}/openai/v4/chat/completions`, |
| 283 | + { |
| 284 | + model: "llama-3.3-70b-versatile", |
| 285 | + stream: false, |
| 286 | + messages: [{ role: "user", content: "hello" }], |
| 287 | + }, |
| 288 | + { Authorization: "Bearer mock-groq-key" }, |
| 289 | + ); |
| 290 | + |
| 291 | + expect(status).toBe(200); |
| 292 | + const parsed = JSON.parse(body); |
| 293 | + expect(parsed.choices).toBeDefined(); |
| 294 | + expect(parsed.choices[0].message.content).toBe("Hello from aimock!"); |
| 295 | + }); |
| 296 | + |
| 297 | + it("normalizes /custom/responses to /v1/responses", async () => { |
| 298 | + instance = await createServer(CATCH_ALL_FIXTURES); |
| 299 | + |
| 300 | + const { body } = await httpPost(`${instance.url}/custom/responses`, { |
| 301 | + model: "gpt-4o", |
| 302 | + input: "hello", |
| 303 | + stream: false, |
| 304 | + }); |
| 305 | + |
| 306 | + // Normalization works: we get "No fixture matched" from the Responses handler |
| 307 | + // (not "Not found" which would mean the path wasn't routed at all) |
| 308 | + const parsed = JSON.parse(body); |
| 309 | + expect(parsed.error.type).toBe("invalid_request_error"); |
| 310 | + expect(parsed.error.code).toBe("no_fixture_match"); |
| 311 | + }); |
| 312 | + |
| 313 | + it("normalizes /custom/audio/speech to /v1/audio/speech", async () => { |
| 314 | + instance = await createServer(CATCH_ALL_FIXTURES); |
| 315 | + |
| 316 | + const { body } = await httpPost(`${instance.url}/custom/audio/speech`, { |
| 317 | + model: "tts-1", |
| 318 | + input: "test speech", |
| 319 | + voice: "alloy", |
| 320 | + }); |
| 321 | + |
| 322 | + // Normalization works: handler reached (not "Not found") |
| 323 | + const parsed = JSON.parse(body); |
| 324 | + expect(parsed.error.type).toBe("invalid_request_error"); |
| 325 | + }); |
| 326 | + |
| 327 | + it("normalizes /custom/audio/transcriptions to /v1/audio/transcriptions", async () => { |
| 328 | + instance = await createServer(CATCH_ALL_FIXTURES); |
| 329 | + |
| 330 | + const { body } = await httpPost(`${instance.url}/custom/audio/transcriptions`, { |
| 331 | + model: "whisper-1", |
| 332 | + file: "test", |
| 333 | + }); |
| 334 | + |
| 335 | + // Normalization works: handler reached (not "Not found") |
| 336 | + const parsed = JSON.parse(body); |
| 337 | + expect(parsed.error.type).toBe("invalid_request_error"); |
| 338 | + }); |
| 339 | + |
| 340 | + it("normalizes /custom/images/generations to /v1/images/generations", async () => { |
| 341 | + instance = await createServer(CATCH_ALL_FIXTURES); |
| 342 | + |
| 343 | + const { body } = await httpPost(`${instance.url}/custom/images/generations`, { |
| 344 | + model: "dall-e-3", |
| 345 | + prompt: "test", |
| 346 | + }); |
| 347 | + |
| 348 | + // Normalization works: handler reached (not "Not found") |
| 349 | + const parsed = JSON.parse(body); |
| 350 | + expect(parsed.error.type).toBe("invalid_request_error"); |
| 351 | + }); |
| 352 | + |
| 353 | + it("does NOT normalize /v2/chat/completions (/v2/ guard for Cohere convention)", async () => { |
| 354 | + instance = await createServer(CATCH_ALL_FIXTURES); |
| 355 | + |
| 356 | + const { status } = await httpPost(`${instance.url}/v2/chat/completions`, { |
| 357 | + model: "command-r-plus", |
| 358 | + stream: false, |
| 359 | + messages: [{ role: "user", content: "hello" }], |
| 360 | + }); |
| 361 | + |
| 362 | + // /v2/chat/completions should NOT be rewritten to /v1/chat/completions |
| 363 | + // — the /v2/ guard prevents normalization, so this falls through to 404 |
| 364 | + expect(status).toBe(404); |
| 365 | + }); |
| 366 | + |
| 367 | + it("routes /v2/chat to Cohere handler (not normalization concern)", async () => { |
| 368 | + instance = await createServer(CATCH_ALL_FIXTURES); |
| 369 | + |
| 370 | + // /v2/chat is Cohere's endpoint — reaches the Cohere handler directly |
| 371 | + const { status } = await httpPost(`${instance.url}/v2/chat`, { |
| 372 | + model: "command-r-plus", |
| 373 | + stream: false, |
| 374 | + messages: [{ role: "user", content: "hello" }], |
| 375 | + }); |
| 376 | + |
| 377 | + expect(status).toBe(200); |
| 378 | + }); |
| 379 | + |
| 380 | + it("returns 404 for unrecognized paths that don't match any suffix", async () => { |
| 381 | + instance = await createServer(CATCH_ALL_FIXTURES); |
| 382 | + |
| 383 | + const { status } = await httpPost(`${instance.url}/custom/foo/bar`, { |
| 384 | + model: "test", |
| 385 | + messages: [{ role: "user", content: "hello" }], |
| 386 | + }); |
| 387 | + |
| 388 | + expect(status).toBe(404); |
| 389 | + }); |
| 390 | +}); |
| 391 | + |
| 392 | +describe("WebSocket path normalization", () => { |
| 393 | + /** |
| 394 | + * Send an HTTP upgrade request and return the resulting status code. |
| 395 | + * 101 = upgrade succeeded (WebSocket), anything else = rejected. |
| 396 | + */ |
| 397 | + function wsUpgrade(url: string, path: string): Promise<{ statusCode: number }> { |
| 398 | + return new Promise((resolve, reject) => { |
| 399 | + const parsed = new URL(url); |
| 400 | + const req = http.request({ |
| 401 | + hostname: parsed.hostname, |
| 402 | + port: parsed.port, |
| 403 | + path, |
| 404 | + headers: { |
| 405 | + Connection: "Upgrade", |
| 406 | + Upgrade: "websocket", |
| 407 | + "Sec-WebSocket-Key": Buffer.from(crypto.randomBytes(16)).toString("base64"), |
| 408 | + "Sec-WebSocket-Version": "13", |
| 409 | + }, |
| 410 | + }); |
| 411 | + req.on("upgrade", (_res, socket) => { |
| 412 | + socket.destroy(); |
| 413 | + resolve({ statusCode: 101 }); |
| 414 | + }); |
| 415 | + req.on("response", (res) => { |
| 416 | + resolve({ statusCode: res.statusCode ?? 0 }); |
| 417 | + }); |
| 418 | + req.on("error", reject); |
| 419 | + req.end(); |
| 420 | + }); |
| 421 | + } |
| 422 | + |
| 423 | + it("WS upgrade to /custom/responses normalizes to /v1/responses", async () => { |
| 424 | + instance = await createServer(CATCH_ALL_FIXTURES); |
| 425 | + const { statusCode } = await wsUpgrade(instance.url, "/custom/responses"); |
| 426 | + expect(statusCode).toBe(101); |
| 427 | + }); |
| 428 | + |
| 429 | + it("WS upgrade to /openai/v1/responses works (/openai/ strip)", async () => { |
| 430 | + instance = await createServer(CATCH_ALL_FIXTURES); |
| 431 | + const { statusCode } = await wsUpgrade(instance.url, "/openai/v1/responses"); |
| 432 | + expect(statusCode).toBe(101); |
| 433 | + }); |
| 434 | + |
| 435 | + it("WS upgrade to /v2/responses is NOT normalized (returns 404)", async () => { |
| 436 | + instance = await createServer(CATCH_ALL_FIXTURES); |
| 437 | + const { statusCode } = await wsUpgrade(instance.url, "/v2/responses"); |
| 438 | + expect(statusCode).toBe(404); |
| 439 | + }); |
| 440 | + |
| 441 | + it("WS upgrade to Azure deployment path is NOT normalized", async () => { |
| 442 | + instance = await createServer(CATCH_ALL_FIXTURES); |
| 443 | + |
| 444 | + // Azure deployment WebSocket path should NOT have /openai/ stripped |
| 445 | + // or be normalized — it should 404 cleanly (Azure WS not supported) |
| 446 | + const { statusCode } = await wsUpgrade( |
| 447 | + instance.url, |
| 448 | + "/openai/deployments/gpt-4o/chat/completions", |
| 449 | + ); |
| 450 | + |
| 451 | + // Not upgraded (Azure deployment paths don't support WS) |
| 452 | + expect(statusCode).toBe(404); |
| 453 | + }); |
| 454 | +}); |
| 455 | + |
212 | 456 | describe("vLLM compatibility", () => { |
213 | 457 | // vLLM uses standard /v1/chat/completions with custom model names |
214 | 458 | it("handles vLLM-style request via /v1/chat/completions", async () => { |
|
0 commit comments