Skip to content

Commit d47fd03

Browse files
committed
feat: 支持 openai provider 通过 OPENAI_WIRE_API 切换到 responses API
1 parent d09f363 commit d47fd03

5 files changed

Lines changed: 1008 additions & 19 deletions

File tree

Lines changed: 242 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,242 @@
1+
import { afterEach, describe, expect, test } from 'bun:test'
2+
import type { ResponseStreamEvent } from 'openai/resources/responses/responses.mjs'
3+
import {
4+
adaptResponsesStreamToAnthropic,
5+
buildOpenAIResponsesRequestBody,
6+
resolveOpenAIWireAPI,
7+
} from '../responses.js'
8+
9+
const originalWireAPI = process.env.OPENAI_WIRE_API
10+
11+
afterEach(() => {
12+
if (originalWireAPI === undefined) {
13+
delete process.env.OPENAI_WIRE_API
14+
} else {
15+
process.env.OPENAI_WIRE_API = originalWireAPI
16+
}
17+
})
18+
19+
async function collectAdaptedEvents(events: ResponseStreamEvent[]) {
20+
async function* stream() {
21+
for (const event of events) {
22+
yield event
23+
}
24+
}
25+
26+
const result = []
27+
for await (const event of adaptResponsesStreamToAnthropic(
28+
stream() as any,
29+
'test-model',
30+
)) {
31+
result.push(event)
32+
}
33+
return result
34+
}
35+
36+
describe('resolveOpenAIWireAPI', () => {
37+
test('defaults to chat completions', () => {
38+
delete process.env.OPENAI_WIRE_API
39+
expect(resolveOpenAIWireAPI()).toBe('chat_completions')
40+
})
41+
42+
test('accepts responses env override', () => {
43+
process.env.OPENAI_WIRE_API = 'responses'
44+
expect(resolveOpenAIWireAPI()).toBe('responses')
45+
})
46+
})
47+
48+
describe('buildOpenAIResponsesRequestBody', () => {
49+
test('converts messages, tools, and tool choice', () => {
50+
const body = buildOpenAIResponsesRequestBody({
51+
model: 'gpt-test',
52+
messages: [
53+
{
54+
type: 'user',
55+
message: { content: 'hello' },
56+
},
57+
{
58+
type: 'assistant',
59+
message: {
60+
content: [
61+
{
62+
type: 'tool_use',
63+
id: 'toolu_123',
64+
name: 'bash',
65+
input: { command: 'ls' },
66+
},
67+
],
68+
},
69+
},
70+
{
71+
type: 'user',
72+
message: {
73+
content: [
74+
{
75+
type: 'tool_result',
76+
tool_use_id: 'toolu_123',
77+
content: 'ok',
78+
},
79+
{
80+
type: 'text',
81+
text: 'next',
82+
},
83+
],
84+
},
85+
},
86+
] as any,
87+
systemPrompt: ['system prompt'] as any,
88+
tools: [
89+
{
90+
type: 'custom',
91+
name: 'bash',
92+
description: 'Run shell commands',
93+
input_schema: {
94+
type: 'object',
95+
properties: {
96+
command: { const: 'ls' },
97+
},
98+
},
99+
strict: true,
100+
},
101+
] as any,
102+
toolChoice: { type: 'tool', name: 'bash' },
103+
enableThinking: false,
104+
maxTokens: 4096,
105+
temperatureOverride: 0.2,
106+
})
107+
108+
expect(body.instructions).toBe('system prompt')
109+
expect(body.max_output_tokens).toBe(4096)
110+
expect(body.tool_choice).toEqual({ type: 'function', name: 'bash' })
111+
expect(body.tools).toEqual([
112+
{
113+
type: 'function',
114+
name: 'bash',
115+
description: 'Run shell commands',
116+
parameters: {
117+
type: 'object',
118+
properties: {
119+
command: { enum: ['ls'] },
120+
},
121+
},
122+
strict: true,
123+
},
124+
])
125+
expect(body.input).toEqual([
126+
{
127+
type: 'message',
128+
role: 'user',
129+
content: [{ type: 'input_text', text: 'hello' }],
130+
},
131+
{
132+
type: 'function_call',
133+
call_id: 'toolu_123',
134+
name: 'bash',
135+
arguments: '{"command":"ls"}',
136+
},
137+
{
138+
type: 'function_call_output',
139+
call_id: 'toolu_123',
140+
output: 'ok',
141+
},
142+
{
143+
type: 'message',
144+
role: 'user',
145+
content: [{ type: 'input_text', text: 'next' }],
146+
},
147+
])
148+
})
149+
})
150+
151+
describe('adaptResponsesStreamToAnthropic', () => {
152+
test('maps streamed function calls and terminal usage', async () => {
153+
const events = await collectAdaptedEvents([
154+
{
155+
type: 'response.created',
156+
sequence_number: 1,
157+
response: {
158+
id: 'resp_1',
159+
object: 'response',
160+
created_at: 0,
161+
model: 'test-model',
162+
output: [],
163+
output_text: '',
164+
tools: [],
165+
tool_choice: 'auto',
166+
parallel_tool_calls: false,
167+
temperature: null,
168+
top_p: null,
169+
error: null,
170+
incomplete_details: null,
171+
instructions: null,
172+
metadata: null,
173+
usage: null,
174+
},
175+
} as any,
176+
{
177+
type: 'response.output_item.added',
178+
sequence_number: 2,
179+
output_index: 0,
180+
item: {
181+
type: 'function_call',
182+
id: 'fc_1',
183+
call_id: 'toolu_123',
184+
name: 'bash',
185+
arguments: '',
186+
status: 'in_progress',
187+
},
188+
} as any,
189+
{
190+
type: 'response.function_call_arguments.delta',
191+
sequence_number: 3,
192+
output_index: 0,
193+
item_id: 'fc_1',
194+
delta: '{"command":"ls"}',
195+
} as any,
196+
{
197+
type: 'response.completed',
198+
sequence_number: 4,
199+
response: {
200+
usage: {
201+
input_tokens: 11,
202+
output_tokens: 7,
203+
total_tokens: 18,
204+
input_tokens_details: { cached_tokens: 2 },
205+
output_tokens_details: { reasoning_tokens: 0 },
206+
},
207+
},
208+
} as any,
209+
])
210+
211+
expect(events).toEqual([
212+
expect.objectContaining({ type: 'message_start' }),
213+
expect.objectContaining({
214+
type: 'content_block_start',
215+
content_block: expect.objectContaining({
216+
type: 'tool_use',
217+
id: 'toolu_123',
218+
name: 'bash',
219+
}),
220+
}),
221+
expect.objectContaining({
222+
type: 'content_block_delta',
223+
delta: {
224+
type: 'input_json_delta',
225+
partial_json: '{"command":"ls"}',
226+
},
227+
}),
228+
expect.objectContaining({ type: 'content_block_stop' }),
229+
expect.objectContaining({
230+
type: 'message_delta',
231+
delta: { stop_reason: 'tool_use', stop_sequence: null },
232+
usage: {
233+
input_tokens: 11,
234+
output_tokens: 7,
235+
cache_creation_input_tokens: 0,
236+
cache_read_input_tokens: 2,
237+
},
238+
}),
239+
expect.objectContaining({ type: 'message_stop' }),
240+
])
241+
})
242+
})

src/services/api/openai/index.ts

Lines changed: 37 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,11 @@ import { logForDebugging } from '../../../utils/debug.js'
2424
import { addToTotalSessionCost } from '../../../cost-tracker.js'
2525
import { calculateUSDCost } from '../../../utils/modelCost.js'
2626
import { isOpenAIThinkingEnabled, resolveOpenAIMaxTokens, buildOpenAIRequestBody } from './requestBody.js'
27+
import {
28+
adaptResponsesStreamToAnthropic,
29+
buildOpenAIResponsesRequestBody,
30+
resolveOpenAIWireAPI,
31+
} from './responses.js'
2732
import { recordLLMObservation } from '../../../services/langfuse/tracing.js'
2833
import { convertMessagesToLangfuse, convertOutputToLangfuse, convertToolsToLangfuse } from '../../../services/langfuse/convert.js'
2934
export { isOpenAIThinkingEnabled, resolveOpenAIMaxTokens, buildOpenAIRequestBody }
@@ -223,29 +228,45 @@ export async function* queryModelOpenAI(
223228
source: options.querySource,
224229
})
225230

231+
const wireAPI = resolveOpenAIWireAPI()
226232
logForDebugging(
227-
`[OpenAI] Calling model=${openaiModel}, messages=${openaiMessages.length}, tools=${openaiTools.length}, thinking=${enableThinking}`,
233+
`[OpenAI] Calling model=${openaiModel}, wire_api=${wireAPI}, messages=${openaiMessages.length}, tools=${openaiTools.length}, thinking=${enableThinking}`,
228234
)
229235

230236
// 12. Call OpenAI API with streaming
231-
const requestBody = buildOpenAIRequestBody({
232-
model: openaiModel,
233-
messages: openaiMessages,
234-
tools: openaiTools,
235-
toolChoice: openaiToolChoice,
236-
enableThinking,
237-
maxTokens,
238-
temperatureOverride: options.temperatureOverride,
239-
})
240-
const stream = await client.chat.completions.create(
241-
requestBody,
242-
{ signal },
243-
)
237+
let adaptedStream: AsyncIterable<any>
238+
if (wireAPI === 'responses') {
239+
const requestBody = buildOpenAIResponsesRequestBody({
240+
model: openaiModel,
241+
messages: messagesForAPI,
242+
systemPrompt,
243+
tools: standardTools,
244+
toolChoice: options.toolChoice,
245+
enableThinking,
246+
maxTokens,
247+
temperatureOverride: options.temperatureOverride,
248+
})
249+
const stream = await client.responses.create(requestBody, { signal })
250+
adaptedStream = adaptResponsesStreamToAnthropic(stream, openaiModel)
251+
} else {
252+
const requestBody = buildOpenAIRequestBody({
253+
model: openaiModel,
254+
messages: openaiMessages,
255+
tools: openaiTools,
256+
toolChoice: openaiToolChoice,
257+
enableThinking,
258+
maxTokens,
259+
temperatureOverride: options.temperatureOverride,
260+
})
261+
const stream = await client.chat.completions.create(
262+
requestBody,
263+
{ signal },
264+
)
265+
adaptedStream = adaptOpenAIStreamToAnthropic(stream, openaiModel)
266+
}
244267

245268
// 12. Convert OpenAI stream to Anthropic events, then process into
246269
// AssistantMessage + StreamEvent (matching the Anthropic path behavior)
247-
const adaptedStream = adaptOpenAIStreamToAnthropic(stream, openaiModel)
248-
249270
// Accumulate content blocks and usage, same as the Anthropic path in claude.ts
250271
const contentBlocks: Record<number, any> = {}
251272
const collectedMessages: AssistantMessage[] = []

0 commit comments

Comments
 (0)