|
1 | 1 | import { describe, expect, test } from "bun:test" |
| 2 | +import { LLMClient, RequestExecutor } from "@opencode-ai/llm/route" |
2 | 3 | import { jsonSchema, tool, type ModelMessage } from "ai" |
| 4 | +import { Effect } from "effect" |
3 | 5 | import { LLMNative } from "@/session/llm-native" |
4 | 6 | import type { Provider } from "@/provider/provider" |
5 | 7 | import { ModelID, ProviderID } from "@/provider/schema" |
@@ -231,4 +233,30 @@ describe("session.llm-native.request", () => { |
231 | 233 | }), |
232 | 234 | ).toThrow("Native LLM request adapter does not support provider package unknown-provider") |
233 | 235 | }) |
| 236 | + |
| 237 | + test("compiles through the native OpenAI Responses route", async () => { |
| 238 | + const prepared = await Effect.runPromise( |
| 239 | + LLMClient.prepare( |
| 240 | + LLMNative.request({ |
| 241 | + model: baseModel, |
| 242 | + messages: [{ role: "user", content: "hello" }], |
| 243 | + providerOptions: { openai: { store: false } }, |
| 244 | + maxOutputTokens: 512, |
| 245 | + headers: { "x-request": "request-header" }, |
| 246 | + }), |
| 247 | + ).pipe(Effect.provide(LLMClient.layer), Effect.provide(RequestExecutor.defaultLayer)), |
| 248 | + ) |
| 249 | + |
| 250 | + expect(prepared).toMatchObject({ |
| 251 | + route: "openai-responses", |
| 252 | + protocol: "openai-responses", |
| 253 | + body: { |
| 254 | + model: "gpt-5-mini", |
| 255 | + input: [{ role: "user", content: [{ type: "input_text", text: "hello" }] }], |
| 256 | + max_output_tokens: 512, |
| 257 | + store: false, |
| 258 | + stream: true, |
| 259 | + }, |
| 260 | + }) |
| 261 | + }) |
234 | 262 | }) |
0 commit comments