diff --git a/src/services/api/openai/__tests__/responses.test.ts b/src/services/api/openai/__tests__/responses.test.ts new file mode 100644 index 000000000..3e37cb75f --- /dev/null +++ b/src/services/api/openai/__tests__/responses.test.ts @@ -0,0 +1,242 @@ +import { afterEach, describe, expect, test } from 'bun:test' +import type { ResponseStreamEvent } from 'openai/resources/responses/responses.mjs' +import { + adaptResponsesStreamToAnthropic, + buildOpenAIResponsesRequestBody, + resolveOpenAIWireAPI, +} from '../responses.js' + +const originalWireAPI = process.env.OPENAI_WIRE_API + +afterEach(() => { + if (originalWireAPI === undefined) { + delete process.env.OPENAI_WIRE_API + } else { + process.env.OPENAI_WIRE_API = originalWireAPI + } +}) + +async function collectAdaptedEvents(events: ResponseStreamEvent[]) { + async function* stream() { + for (const event of events) { + yield event + } + } + + const result = [] + for await (const event of adaptResponsesStreamToAnthropic( + stream() as any, + 'test-model', + )) { + result.push(event) + } + return result +} + +describe('resolveOpenAIWireAPI', () => { + test('defaults to chat completions', () => { + delete process.env.OPENAI_WIRE_API + expect(resolveOpenAIWireAPI()).toBe('chat_completions') + }) + + test('accepts responses env override', () => { + process.env.OPENAI_WIRE_API = 'responses' + expect(resolveOpenAIWireAPI()).toBe('responses') + }) +}) + +describe('buildOpenAIResponsesRequestBody', () => { + test('converts messages, tools, and tool choice', () => { + const body = buildOpenAIResponsesRequestBody({ + model: 'gpt-test', + messages: [ + { + type: 'user', + message: { content: 'hello' }, + }, + { + type: 'assistant', + message: { + content: [ + { + type: 'tool_use', + id: 'toolu_123', + name: 'bash', + input: { command: 'ls' }, + }, + ], + }, + }, + { + type: 'user', + message: { + content: [ + { + type: 'tool_result', + tool_use_id: 'toolu_123', + content: 'ok', + }, + { + type: 'text', + text: 'next', + }, + ], + }, + }, + ] as any, + systemPrompt: ['system prompt'] as any, + tools: [ + { + type: 'custom', + name: 'bash', + description: 'Run shell commands', + input_schema: { + type: 'object', + properties: { + command: { const: 'ls' }, + }, + }, + strict: true, + }, + ] as any, + toolChoice: { type: 'tool', name: 'bash' }, + enableThinking: false, + maxTokens: 4096, + temperatureOverride: 0.2, + }) + + expect(body.instructions).toBe('system prompt') + expect(body.max_output_tokens).toBe(4096) + expect(body.tool_choice).toEqual({ type: 'function', name: 'bash' }) + expect(body.tools).toEqual([ + { + type: 'function', + name: 'bash', + description: 'Run shell commands', + parameters: { + type: 'object', + properties: { + command: { enum: ['ls'] }, + }, + }, + strict: true, + }, + ]) + expect(body.input).toEqual([ + { + type: 'message', + role: 'user', + content: [{ type: 'input_text', text: 'hello' }], + }, + { + type: 'function_call', + call_id: 'toolu_123', + name: 'bash', + arguments: '{"command":"ls"}', + }, + { + type: 'function_call_output', + call_id: 'toolu_123', + output: 'ok', + }, + { + type: 'message', + role: 'user', + content: [{ type: 'input_text', text: 'next' }], + }, + ]) + }) +}) + +describe('adaptResponsesStreamToAnthropic', () => { + test('maps streamed function calls and terminal usage', async () => { + const events = await collectAdaptedEvents([ + { + type: 'response.created', + sequence_number: 1, + response: { + id: 'resp_1', + object: 'response', + created_at: 0, + model: 'test-model', + output: [], + output_text: '', + tools: [], + tool_choice: 'auto', + parallel_tool_calls: false, + temperature: null, + top_p: null, + error: null, + incomplete_details: null, + instructions: null, + metadata: null, + usage: null, + }, + } as any, + { + type: 'response.output_item.added', + sequence_number: 2, + output_index: 0, + item: { + type: 'function_call', + id: 'fc_1', + call_id: 'toolu_123', + name: 'bash', + arguments: '', + status: 'in_progress', + }, + } as any, + { + type: 'response.function_call_arguments.delta', + sequence_number: 3, + output_index: 0, + item_id: 'fc_1', + delta: '{"command":"ls"}', + } as any, + { + type: 'response.completed', + sequence_number: 4, + response: { + usage: { + input_tokens: 11, + output_tokens: 7, + total_tokens: 18, + input_tokens_details: { cached_tokens: 2 }, + output_tokens_details: { reasoning_tokens: 0 }, + }, + }, + } as any, + ]) + + expect(events).toEqual([ + expect.objectContaining({ type: 'message_start' }), + expect.objectContaining({ + type: 'content_block_start', + content_block: expect.objectContaining({ + type: 'tool_use', + id: 'toolu_123', + name: 'bash', + }), + }), + expect.objectContaining({ + type: 'content_block_delta', + delta: { + type: 'input_json_delta', + partial_json: '{"command":"ls"}', + }, + }), + expect.objectContaining({ type: 'content_block_stop' }), + expect.objectContaining({ + type: 'message_delta', + delta: { stop_reason: 'tool_use', stop_sequence: null }, + usage: { + input_tokens: 11, + output_tokens: 7, + cache_creation_input_tokens: 0, + cache_read_input_tokens: 2, + }, + }), + expect.objectContaining({ type: 'message_stop' }), + ]) + }) +}) diff --git a/src/services/api/openai/index.ts b/src/services/api/openai/index.ts index 248c2dac3..62d3ee33d 100644 --- a/src/services/api/openai/index.ts +++ b/src/services/api/openai/index.ts @@ -25,6 +25,11 @@ import { logForDebugging } from '../../../utils/debug.js' import { addToTotalSessionCost } from '../../../cost-tracker.js' import { calculateUSDCost } from '../../../utils/modelCost.js' import { isOpenAIThinkingEnabled, resolveOpenAIMaxTokens, buildOpenAIRequestBody } from './requestBody.js' +import { + adaptResponsesStreamToAnthropic, + buildOpenAIResponsesRequestBody, + resolveOpenAIWireAPI, +} from './responses.js' import { recordLLMObservation } from '../../../services/langfuse/tracing.js' import { convertMessagesToLangfuse, convertOutputToLangfuse, convertToolsToLangfuse } from '../../../services/langfuse/convert.js' export { isOpenAIThinkingEnabled, resolveOpenAIMaxTokens, buildOpenAIRequestBody } @@ -273,29 +278,45 @@ export async function* queryModelOpenAI( source: options.querySource, }) + const wireAPI = resolveOpenAIWireAPI() logForDebugging( - `[OpenAI] Calling model=${openaiModel}, messages=${openaiMessages.length}, tools=${openaiTools.length}, thinking=${enableThinking}`, + `[OpenAI] Calling model=${openaiModel}, wire_api=${wireAPI}, messages=${openaiMessages.length}, tools=${openaiTools.length}, thinking=${enableThinking}`, ) // 12. Call OpenAI API with streaming - const requestBody = buildOpenAIRequestBody({ - model: openaiModel, - messages: openaiMessages, - tools: openaiTools, - toolChoice: openaiToolChoice, - enableThinking, - maxTokens, - temperatureOverride: options.temperatureOverride, - }) - const stream = await client.chat.completions.create( - requestBody, - { signal }, - ) + let adaptedStream: AsyncIterable + if (wireAPI === 'responses') { + const requestBody = buildOpenAIResponsesRequestBody({ + model: openaiModel, + messages: messagesForAPI, + systemPrompt, + tools: standardTools, + toolChoice: options.toolChoice, + enableThinking, + maxTokens, + temperatureOverride: options.temperatureOverride, + }) + const stream = await client.responses.create(requestBody, { signal }) + adaptedStream = adaptResponsesStreamToAnthropic(stream, openaiModel) + } else { + const requestBody = buildOpenAIRequestBody({ + model: openaiModel, + messages: openaiMessages, + tools: openaiTools, + toolChoice: openaiToolChoice, + enableThinking, + maxTokens, + temperatureOverride: options.temperatureOverride, + }) + const stream = await client.chat.completions.create( + requestBody, + { signal }, + ) + adaptedStream = adaptOpenAIStreamToAnthropic(stream, openaiModel) + } // 12. Convert OpenAI stream to Anthropic events, then process into // AssistantMessage + StreamEvent (matching the Anthropic path behavior) - const adaptedStream = adaptOpenAIStreamToAnthropic(stream, openaiModel) - // Accumulate content blocks and usage, same as the Anthropic path in claude.ts const contentBlocks: Record = {} const collectedMessages: AssistantMessage[] = [] diff --git a/src/services/api/openai/responses.ts b/src/services/api/openai/responses.ts new file mode 100644 index 000000000..0b1e3b491 --- /dev/null +++ b/src/services/api/openai/responses.ts @@ -0,0 +1,725 @@ +import type { BetaRawMessageStreamEvent, BetaToolUnion } from '@anthropic-ai/sdk/resources/beta/messages/messages.mjs' +import type { AssistantMessage, UserMessage } from '../../../types/message.js' +import type { SystemPrompt } from '../../../utils/systemPromptType.js' +import type { Stream } from 'openai/streaming.mjs' +import type { + FunctionTool, + ResponseCreateParamsStreaming, + ResponseInputImage, + ResponseInputItem, + ResponseInputText, + ResponseStreamEvent, + ToolChoiceFunction, + ToolChoiceOptions, +} from 'openai/resources/responses/responses.mjs' +import { randomUUID } from 'crypto' + +export type OpenAIWireAPI = 'chat_completions' | 'responses' + +export function resolveOpenAIWireAPI(): OpenAIWireAPI { + const raw = process.env.OPENAI_WIRE_API?.trim().toLowerCase() + if ( + raw === 'responses' || + raw === 'response' + ) { + return 'responses' + } + return 'chat_completions' +} + +export function anthropicMessagesToResponses( + messages: (UserMessage | AssistantMessage)[], + systemPrompt: SystemPrompt, +): { + instructions: string | undefined + input: ResponseInputItem[] +} { + const input: ResponseInputItem[] = [] + + for (const message of messages) { + if (message.type === 'user') { + input.push(...convertUserMessageToResponses(message)) + continue + } + if (message.type === 'assistant') { + input.push(...convertAssistantMessageToResponses(message)) + } + } + + const instructions = systemPromptToText(systemPrompt) || undefined + return { instructions, input } +} + +export function anthropicToolsToResponses( + tools: BetaToolUnion[], +): FunctionTool[] { + return tools + .filter(tool => { + const toolType = (tool as { type?: string }).type + return ( + tool.type === 'custom' || !('type' in tool) || toolType !== 'server' + ) + }) + .map(tool => { + const anyTool = tool as unknown as Record + return { + type: 'function', + name: (anyTool.name as string) || '', + description: (anyTool.description as string) || null, + parameters: sanitizeJsonSchema( + (anyTool.input_schema as Record | undefined) || { + type: 'object', + properties: {}, + }, + ), + strict: + typeof anyTool.strict === 'boolean' ? anyTool.strict : null, + ...(typeof anyTool.defer_loading === 'boolean' + ? { defer_loading: anyTool.defer_loading } + : {}), + } satisfies FunctionTool + }) +} + +export function anthropicToolChoiceToResponses( + toolChoice: unknown, +): ToolChoiceOptions | ToolChoiceFunction | undefined { + if (!toolChoice || typeof toolChoice !== 'object') return undefined + + const tc = toolChoice as Record + const type = tc.type as string + + switch (type) { + case 'auto': + return 'auto' + case 'any': + return 'required' + case 'tool': + return { + type: 'function', + name: tc.name as string, + } + default: + return undefined + } +} + +export function buildOpenAIResponsesRequestBody(params: { + model: string + messages: (UserMessage | AssistantMessage)[] + systemPrompt: SystemPrompt + tools: BetaToolUnion[] + toolChoice: unknown + enableThinking: boolean + maxTokens: number + temperatureOverride?: number +}): ResponseCreateParamsStreaming { + const { + model, + messages, + systemPrompt, + tools, + toolChoice, + enableThinking, + maxTokens, + temperatureOverride, + } = params + const responseInput = anthropicMessagesToResponses(messages, systemPrompt) + const responseTools = anthropicToolsToResponses(tools) + const responsesToolChoice = anthropicToolChoiceToResponses(toolChoice) + + return { + model, + stream: true, + store: false, + input: responseInput.input, + ...(responseInput.instructions + ? { instructions: responseInput.instructions } + : {}), + max_output_tokens: maxTokens, + ...(responseTools.length > 0 ? { tools: responseTools } : {}), + ...(responsesToolChoice ? { tool_choice: responsesToolChoice } : {}), + ...(!enableThinking && + temperatureOverride !== undefined && { temperature: temperatureOverride }), + } +} + +export async function* adaptResponsesStreamToAnthropic( + stream: Stream, + model: string, +): AsyncGenerator { + const messageId = `msg_${randomUUID().replace(/-/g, '').slice(0, 24)}` + let started = false + let currentContentIndex = -1 + let terminalSeen = false + + let inputTokens = 0 + let outputTokens = 0 + let cachedReadTokens = 0 + + const openTextBlocks = new Map() + const toolBlocks = new Map< + number, + { contentIndex: number; id: string; name: string; arguments: string } + >() + const openBlockIndices = new Set() + + const ensureMessageStart = async function* () { + if (started) return + started = true + yield { + type: 'message_start', + message: { + id: messageId, + type: 'message', + role: 'assistant', + content: [], + model, + stop_reason: null, + stop_sequence: null, + usage: { + input_tokens: 0, + output_tokens: 0, + cache_creation_input_tokens: 0, + cache_read_input_tokens: 0, + }, + }, + } as unknown as BetaRawMessageStreamEvent + } + + const closeAllTextBlocks = async function* () { + for (const [, contentIndex] of openTextBlocks) { + if (!openBlockIndices.has(contentIndex)) continue + yield { + type: 'content_block_stop', + index: contentIndex, + } as BetaRawMessageStreamEvent + openBlockIndices.delete(contentIndex) + } + openTextBlocks.clear() + } + + const closeAllToolBlocks = async function* () { + for (const [, block] of toolBlocks) { + if (!openBlockIndices.has(block.contentIndex)) continue + yield { + type: 'content_block_stop', + index: block.contentIndex, + } as BetaRawMessageStreamEvent + openBlockIndices.delete(block.contentIndex) + } + } + + const ensureToolBlock = async function* ( + outputIndex: number, + options?: { callId?: string; name?: string }, + ) { + const existing = toolBlocks.get(outputIndex) + if (existing) { + if (options?.name && !existing.name) existing.name = options.name + return existing + } + + yield* closeAllTextBlocks() + currentContentIndex++ + const block = { + contentIndex: currentContentIndex, + id: + options?.callId || + `toolu_${randomUUID().replace(/-/g, '').slice(0, 24)}`, + name: options?.name || '', + arguments: '', + } + toolBlocks.set(outputIndex, block) + openBlockIndices.add(block.contentIndex) + yield { + type: 'content_block_start', + index: block.contentIndex, + content_block: { + type: 'tool_use', + id: block.id, + name: block.name, + input: {}, + }, + } as BetaRawMessageStreamEvent + return block + } + + const ensureTextBlock = async function* ( + outputIndex: number, + contentIndex: number, + ) { + const key = `${outputIndex}:${contentIndex}` + const existing = openTextBlocks.get(key) + if (existing !== undefined) return existing + + yield* closeAllToolBlocks() + currentContentIndex++ + openTextBlocks.set(key, currentContentIndex) + openBlockIndices.add(currentContentIndex) + yield { + type: 'content_block_start', + index: currentContentIndex, + content_block: { + type: 'text', + text: '', + }, + } as BetaRawMessageStreamEvent + return currentContentIndex + } + + const updateUsage = (usage: { + input_tokens?: number + output_tokens?: number + input_tokens_details?: { cached_tokens?: number } + } | null | undefined) => { + if (!usage) return + if (typeof usage.input_tokens === 'number') { + inputTokens = usage.input_tokens + } + if (typeof usage.output_tokens === 'number') { + outputTokens = usage.output_tokens + } + if (typeof usage.input_tokens_details?.cached_tokens === 'number') { + cachedReadTokens = usage.input_tokens_details.cached_tokens + } + } + + const emitTerminal = async function* ( + stopReason: string, + usageSource?: { + usage?: { + input_tokens?: number + output_tokens?: number + input_tokens_details?: { cached_tokens?: number } + } | null + }, + ) { + if (terminalSeen) return + terminalSeen = true + updateUsage(usageSource?.usage) + yield* closeAllTextBlocks() + yield* closeAllToolBlocks() + + yield { + type: 'message_delta', + delta: { + stop_reason: stopReason, + stop_sequence: null, + }, + usage: { + input_tokens: inputTokens, + output_tokens: outputTokens, + cache_creation_input_tokens: 0, + cache_read_input_tokens: cachedReadTokens, + }, + } as BetaRawMessageStreamEvent + + yield { + type: 'message_stop', + } as BetaRawMessageStreamEvent + } + + for await (const event of stream) { + yield* ensureMessageStart() + + switch (event.type) { + case 'response.created': + updateUsage(event.response.usage) + break + case 'response.output_item.added': { + const item = event.item + if (item.type === 'function_call') { + const block = yield* ensureToolBlock(event.output_index, { + callId: item.call_id, + name: item.name, + }) + if (item.arguments) { + block.arguments += item.arguments + yield { + type: 'content_block_delta', + index: block.contentIndex, + delta: { + type: 'input_json_delta', + partial_json: item.arguments, + }, + } as BetaRawMessageStreamEvent + } + } + break + } + case 'response.output_text.delta': { + const contentIndex = yield* ensureTextBlock( + event.output_index, + event.content_index, + ) + yield { + type: 'content_block_delta', + index: contentIndex, + delta: { + type: 'text_delta', + text: event.delta, + }, + } as BetaRawMessageStreamEvent + break + } + case 'response.output_item.done': { + const item = event.item + if (item.type === 'function_call') { + const block = yield* ensureToolBlock(event.output_index, { + callId: item.call_id, + name: item.name, + }) + const remainingArgs = item.arguments.slice(block.arguments.length) + if (remainingArgs) { + block.arguments += remainingArgs + yield { + type: 'content_block_delta', + index: block.contentIndex, + delta: { + type: 'input_json_delta', + partial_json: remainingArgs, + }, + } as BetaRawMessageStreamEvent + } + } else if (item.type === 'message') { + for (let idx = 0; idx < item.content.length; idx++) { + const part = item.content[idx] + const partText = + part.type === 'output_text' + ? part.text + : part.type === 'refusal' + ? part.refusal + : '' + if ( + (part.type === 'output_text' || part.type === 'refusal') && + partText && + !openTextBlocks.has(`${event.output_index}:${idx}`) + ) { + const contentIndex = yield* ensureTextBlock(event.output_index, idx) + yield { + type: 'content_block_delta', + index: contentIndex, + delta: { + type: 'text_delta', + text: partText, + }, + } as BetaRawMessageStreamEvent + } + } + } + break + } + case 'response.function_call_arguments.delta': { + const block = yield* ensureToolBlock(event.output_index) + block.arguments += event.delta + yield { + type: 'content_block_delta', + index: block.contentIndex, + delta: { + type: 'input_json_delta', + partial_json: event.delta, + }, + } as BetaRawMessageStreamEvent + break + } + case 'response.function_call_arguments.done': { + const block = yield* ensureToolBlock(event.output_index, { + name: event.name, + }) + const remainingArgs = event.arguments.slice(block.arguments.length) + if (remainingArgs) { + block.arguments += remainingArgs + yield { + type: 'content_block_delta', + index: block.contentIndex, + delta: { + type: 'input_json_delta', + partial_json: remainingArgs, + }, + } as BetaRawMessageStreamEvent + } + break + } + case 'response.completed': + yield* emitTerminal( + toolBlocks.size > 0 ? 'tool_use' : 'end_turn', + event.response, + ) + break + case 'response.incomplete': + yield* emitTerminal( + mapResponsesStopReason(event.response.incomplete_details?.reason), + event.response, + ) + break + case 'response.failed': + throw new Error( + event.response.error?.message || 'Responses API request failed', + ) + case 'error': + throw new Error(event.message) + default: + break + } + } + + if (!started) { + yield* ensureMessageStart() + } + if (!terminalSeen) { + yield* emitTerminal(toolBlocks.size > 0 ? 'tool_use' : 'end_turn') + } +} + +function systemPromptToText(systemPrompt: SystemPrompt): string { + if (!systemPrompt || systemPrompt.length === 0) return '' + return systemPrompt.filter(Boolean).join('\n\n') +} + +function convertUserMessageToResponses(msg: UserMessage): ResponseInputItem[] { + const result: ResponseInputItem[] = [] + const content = msg.message.content + + if (typeof content === 'string') { + if (content.length > 0) { + result.push({ + type: 'message', + role: 'user', + content: [{ type: 'input_text', text: content }], + }) + } + return result + } + + if (!Array.isArray(content)) return result + + const toolOutputs: ResponseInputItem[] = [] + const messageContent: Array = [] + + for (const rawBlock of content as unknown[]) { + const block = rawBlock as any + if (typeof block === 'string') { + if (block.length > 0) { + messageContent.push({ type: 'input_text', text: block }) + } + continue + } + + if (block.type === 'text') { + if (block.text.length > 0) { + messageContent.push({ type: 'input_text', text: block.text }) + } + continue + } + + if (block.type === 'tool_result') { + toolOutputs.push({ + type: 'function_call_output', + call_id: block.tool_use_id, + output: toolResultContentToString(block.content), + }) + continue + } + + if (block.type === 'image') { + const imagePart = convertImageBlockToResponses( + block as unknown as Record, + ) + if (imagePart) messageContent.push(imagePart) + } + } + + result.push(...toolOutputs) + + if (messageContent.length > 0) { + result.push({ + type: 'message', + role: 'user', + content: messageContent, + }) + } + + return result +} + +function convertAssistantMessageToResponses( + msg: AssistantMessage, +): ResponseInputItem[] { + const result: ResponseInputItem[] = [] + const content = msg.message.content + + if (typeof content === 'string') { + if (content.length > 0) { + result.push({ + type: 'message', + role: 'assistant', + content, + }) + } + return result + } + + if (!Array.isArray(content)) return result + + const pendingText: string[] = [] + const flushText = () => { + if (pendingText.length === 0) return + result.push({ + type: 'message', + role: 'assistant', + content: pendingText.join('\n'), + }) + pendingText.length = 0 + } + + for (const rawBlock of content as unknown[]) { + const block = rawBlock as any + if (typeof block === 'string') { + if (block.length > 0) pendingText.push(block) + continue + } + + if (block.type === 'text') { + if (block.text.length > 0) pendingText.push(block.text) + continue + } + + if (block.type === 'tool_use') { + flushText() + result.push({ + type: 'function_call', + call_id: block.id, + name: block.name, + arguments: + typeof block.input === 'string' + ? block.input + : JSON.stringify(block.input), + }) + } + } + + flushText() + return result +} + +function toolResultContentToString(content: unknown): string { + if (typeof content === 'string') return content + if (!Array.isArray(content)) return '' + return content + .map(item => { + if (typeof item === 'string') return item + if ( + item && + typeof item === 'object' && + 'text' in item && + typeof item.text === 'string' + ) { + return item.text + } + return '' + }) + .filter(Boolean) + .join('\n') +} + +function convertImageBlockToResponses( + block: Record, +): ResponseInputImage | null { + const source = block.source as Record | undefined + if (!source) return null + + if (source.type === 'base64' && typeof source.data === 'string') { + const mediaType = (source.media_type as string) || 'image/png' + return { + type: 'input_image', + detail: 'auto', + image_url: `data:${mediaType};base64,${source.data}`, + } + } + + if (source.type === 'url' && typeof source.url === 'string') { + return { + type: 'input_image', + detail: 'auto', + image_url: source.url, + } + } + + return null +} + +function sanitizeJsonSchema( + schema: Record, +): Record { + if (!schema || typeof schema !== 'object') return schema + + const result = { ...schema } + + if ('const' in result) { + result.enum = [result.const] + delete result.const + } + + const objectKeys = [ + 'properties', + 'definitions', + '$defs', + 'patternProperties', + ] as const + for (const key of objectKeys) { + const nested = result[key] + if (nested && typeof nested === 'object') { + const sanitized: Record = {} + for (const [k, v] of Object.entries( + nested as Record, + )) { + sanitized[k] = + v && typeof v === 'object' + ? sanitizeJsonSchema(v as Record) + : v + } + result[key] = sanitized + } + } + + const singleKeys = [ + 'items', + 'additionalProperties', + 'not', + 'if', + 'then', + 'else', + 'contains', + 'propertyNames', + ] as const + for (const key of singleKeys) { + const nested = result[key] + if (nested && typeof nested === 'object' && !Array.isArray(nested)) { + result[key] = sanitizeJsonSchema(nested as Record) + } + } + + const arrayKeys = ['anyOf', 'oneOf', 'allOf'] as const + for (const key of arrayKeys) { + const nested = result[key] + if (Array.isArray(nested)) { + result[key] = nested.map(item => + item && typeof item === 'object' + ? sanitizeJsonSchema(item as Record) + : item, + ) + } + } + + return result +} + +function mapResponsesStopReason(reason: string | null | undefined): string { + switch (reason) { + case 'max_output_tokens': + return 'max_tokens' + default: + return 'end_turn' + } +} diff --git a/src/utils/managedEnvConstants.ts b/src/utils/managedEnvConstants.ts index d1976c114..2c33439ce 100644 --- a/src/utils/managedEnvConstants.ts +++ b/src/utils/managedEnvConstants.ts @@ -61,6 +61,7 @@ const PROVIDER_MANAGED_ENV_VARS = new Set([ 'OPENAI_API_KEY', 'OPENAI_BASE_URL', 'OPENAI_MODEL', + 'OPENAI_WIRE_API', 'OPENAI_DEFAULT_HAIKU_MODEL', 'OPENAI_DEFAULT_HAIKU_MODEL_DESCRIPTION', 'OPENAI_DEFAULT_HAIKU_MODEL_NAME', @@ -166,6 +167,7 @@ export const SAFE_ENV_VARS = new Set([ 'OPENAI_DEFAULT_HAIKU_MODEL_DESCRIPTION', 'OPENAI_DEFAULT_HAIKU_MODEL_NAME', 'OPENAI_DEFAULT_HAIKU_MODEL_SUPPORTED_CAPABILITIES', + 'OPENAI_WIRE_API', 'OPENAI_DEFAULT_OPUS_MODEL', 'OPENAI_DEFAULT_OPUS_MODEL_DESCRIPTION', 'OPENAI_DEFAULT_OPUS_MODEL_NAME', diff --git a/src/utils/settings/types.ts b/src/utils/settings/types.ts index e7b0bbfb5..eb374aa00 100644 --- a/src/utils/settings/types.ts +++ b/src/utils/settings/types.ts @@ -372,8 +372,8 @@ export const SettingsSchema = lazySchema(() => .enum(['anthropic', 'openai', 'gemini', 'grok']) .optional() .describe( - 'API provider type. "anthropic" uses the Anthropic API (default), "openai" uses the OpenAI Chat Completions API, "gemini" uses the Gemini API, and "grok" uses the xAI Grok API (OpenAI-compatible). ' + - 'When set to "openai", configure OPENAI_API_KEY, OPENAI_BASE_URL, and OPENAI_MODEL. When set to "gemini", configure GEMINI_API_KEY and optional GEMINI_BASE_URL. When set to "grok", configure GROK_API_KEY (or XAI_API_KEY), optional GROK_BASE_URL, GROK_MODEL, and GROK_MODEL_MAP.', + 'API provider type. "anthropic" uses the Anthropic API (default), "openai" uses the OpenAI-compatible API, "gemini" uses the Gemini API, and "grok" uses the xAI Grok API (OpenAI-compatible). ' + + 'When set to "openai", configure OPENAI_API_KEY, OPENAI_BASE_URL, and OPENAI_MODEL. Set OPENAI_WIRE_API=responses to use the Responses API instead of chat completions. When set to "gemini", configure GEMINI_API_KEY and optional GEMINI_BASE_URL. When set to "grok", configure GROK_API_KEY (or XAI_API_KEY), optional GROK_BASE_URL, GROK_MODEL, and GROK_MODEL_MAP.', ), model: z .string() @@ -1159,4 +1159,3 @@ export type PluginConfig = { [serverName: string]: UserConfigValues } } -