Skip to content

Commit 901628b

Browse files
authored
fix: 修复 OpenAI provider (gpt-5.4/gpt-5.3-codex等模型)下 内建mcp__plugin_weixin_weixin__reply 微信工具不可见的问题 (#359)
* fix: 修复 OpenAI provider 下 MCP 工具不可见 * docs: 补充 OpenAI MCP 工具列表注释 * fix: 修正 OpenAI Langfuse 输入记录 * refactor: 使用类型守卫收窄 Langfuse role * fix: 保留 Langfuse OpenAI 数组消息角色 * fix: 合并 Langfuse OpenAI tool_calls * fix: 修复 OpenAI Langfuse 类型检查
1 parent cf33c06 commit 901628b

5 files changed

Lines changed: 355 additions & 16 deletions

File tree

src/services/api/claude.ts

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1340,7 +1340,10 @@ async function* queryModel(
13401340
// media stripping) but before Anthropic-specific logic (betas, thinking, caching).
13411341
if (getAPIProvider() === 'openai') {
13421342
const { queryModelOpenAI } = await import('./openai/index.js')
1343-
yield* queryModelOpenAI(messagesForAPI, systemPrompt, filteredTools, signal, options)
1343+
// OpenAI emulates Anthropic's dynamic tool loading client-side. It needs
1344+
// the full tool pool so ToolSearchTool can search deferred MCP tools that
1345+
// were intentionally filtered out of the initial API tool list above.
1346+
yield* queryModelOpenAI(messagesForAPI, systemPrompt, tools, signal, options)
13441347
return
13451348
}
13461349

src/services/api/openai/__tests__/queryModelOpenAI.isolated.ts

Lines changed: 117 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -196,10 +196,52 @@ async function runQueryModel(
196196
// We mock at module level. Bun's mock.module replaces the module for the
197197
// entire file, so we configure the stream per-test via a shared variable.
198198
let _nextEvents: BetaRawMessageStreamEvent[] = []
199+
let _toolSearchEnabled = false
199200

200201
/** Captured arguments from the last chat.completions.create() call */
201202
let _lastCreateArgs: Record<string, any> | null = null
202203

204+
mock.module('@ant/model-provider', () => ({
205+
resolveOpenAIModel: (m: string) => m,
206+
adaptOpenAIStreamToAnthropic: (_stream: any, _model: string) =>
207+
eventStream(_nextEvents),
208+
anthropicMessagesToOpenAI: (messages: any[]) =>
209+
messages.map(msg => ({
210+
role: msg.message?.role ?? 'user',
211+
content: msg.message?.content ?? '',
212+
})),
213+
anthropicToolsToOpenAI: (tools: any[]) =>
214+
tools.map(tool => ({
215+
type: 'function',
216+
function: {
217+
name: tool.name,
218+
description: tool.description ?? '',
219+
parameters: tool.input_schema ?? { type: 'object', properties: {} },
220+
},
221+
})),
222+
anthropicToolChoiceToOpenAI: () => undefined,
223+
}))
224+
225+
mock.module('../../../../utils/envUtils.js', () => ({
226+
isEnvTruthy: (value: string | undefined) =>
227+
value === '1' || value === 'true' || value === 'yes' || value === 'on',
228+
isEnvDefinedFalsy: (value: string | undefined) =>
229+
value === '0' || value === 'false' || value === 'no' || value === 'off',
230+
}))
231+
232+
mock.module('../../../../services/analytics/growthbook.js', () => ({
233+
getFeatureValue_CACHED_MAY_BE_STALE: (_key: string, fallback: unknown) =>
234+
fallback,
235+
}))
236+
237+
mock.module('src/bootstrap/state.js', () => ({
238+
isReplBridgeActive: () => false,
239+
}))
240+
241+
mock.module('bun:bundle', () => ({
242+
feature: () => false,
243+
}))
244+
203245
mock.module('../client.js', () => ({
204246
getOpenAIClient: () => ({
205247
chat: {
@@ -252,6 +294,13 @@ mock.module('../../../../utils/context.js', () => ({
252294
mock.module('../../../../utils/messages.js', () => ({
253295
normalizeMessagesForAPI: (msgs: any) => msgs,
254296
normalizeContentFromAPI: (blocks: any[]) => blocks,
297+
createUserMessage: (opts: any) => ({
298+
type: 'user',
299+
message: { role: 'user', content: opts.content },
300+
uuid: 'user-uuid',
301+
timestamp: new Date().toISOString(),
302+
isMeta: opts.isMeta,
303+
}),
255304
createAssistantAPIErrorMessage: (opts: any) => ({
256305
type: 'assistant',
257306
message: {
@@ -268,8 +317,9 @@ mock.module('../../../../utils/api.js', () => ({
268317
}))
269318

270319
mock.module('../../../../utils/toolSearch.js', () => ({
271-
isToolSearchEnabled: async () => false,
320+
isToolSearchEnabled: async () => _toolSearchEnabled,
272321
extractDiscoveredToolNames: () => new Set(),
322+
isDeferredToolsDeltaEnabled: () => false,
273323
}))
274324

275325
mock.module('../../../../tools/ToolSearchTool/prompt.js', () => ({
@@ -297,6 +347,16 @@ mock.module('../../../../utils/modelCost.js', () => ({
297347
getModelPricingString: () => undefined,
298348
}))
299349

350+
mock.module('../../../../services/langfuse/tracing.js', () => ({
351+
recordLLMObservation: () => {},
352+
}))
353+
354+
mock.module('../../../../services/langfuse/convert.js', () => ({
355+
convertMessagesToLangfuse: () => [],
356+
convertOutputToLangfuse: () => ({}),
357+
convertToolsToLangfuse: () => [],
358+
}))
359+
300360
mock.module('../../../../utils/debug.js', () => ({
301361
logForDebugging: () => {},
302362
logAntError: () => {},
@@ -543,3 +603,59 @@ describe('queryModelOpenAI — max_tokens forwarded to request', () => {
543603
expect(_lastCreateArgs!.max_tokens).toBe(8192)
544604
})
545605
})
606+
607+
describe('queryModelOpenAI — deferred MCP tool visibility', () => {
608+
test('prepends available deferred MCP tools to OpenAI messages', async () => {
609+
_toolSearchEnabled = true
610+
_nextEvents = [makeMessageStart(), makeMessageStop()]
611+
612+
try {
613+
const { queryModelOpenAI } = await import('../index.js')
614+
const tools: any[] = [
615+
{
616+
name: 'ToolSearch',
617+
isMcp: false,
618+
input_schema: { type: 'object', properties: {} },
619+
prompt: async () => 'Search deferred tools',
620+
},
621+
{
622+
name: 'mcp__wechat__send_message',
623+
isMcp: true,
624+
input_schema: { type: 'object', properties: {} },
625+
prompt: async () => 'Send a WeChat message',
626+
},
627+
]
628+
629+
const options: any = {
630+
model: 'test-model',
631+
tools: [],
632+
agents: [],
633+
querySource: 'main_loop',
634+
getToolPermissionContext: async () => ({
635+
alwaysAllow: [],
636+
alwaysDeny: [],
637+
needsPermission: [],
638+
mode: 'default',
639+
isBypassingPermissions: false,
640+
}),
641+
}
642+
643+
for await (const _item of queryModelOpenAI(
644+
[],
645+
{ type: 'text', text: '' } as any,
646+
tools as any,
647+
new AbortController().signal,
648+
options,
649+
)) {
650+
// Exhaust generator so request body is built.
651+
}
652+
653+
expect(_lastCreateArgs).not.toBeNull()
654+
expect(JSON.stringify(_lastCreateArgs!.messages)).toContain(
655+
'<available-deferred-tools>\\nmcp__wechat__send_message\\n</available-deferred-tools>',
656+
)
657+
} finally {
658+
_toolSearchEnabled = false
659+
}
660+
})
661+
})

src/services/api/openai/index.ts

Lines changed: 54 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@ import type {
55
StreamEvent,
66
SystemAPIErrorMessage,
77
AssistantMessage,
8+
UserMessage,
89
} from '../../../types/message.js'
910
import type { AgentId } from '../../../types/ids.js'
1011
import type { Tools } from '../../../Tool.js'
@@ -32,18 +33,58 @@ import type { Options } from '../claude.js'
3233
import { randomUUID } from 'crypto'
3334
import {
3435
createAssistantAPIErrorMessage,
36+
createUserMessage,
3537
normalizeContentFromAPI,
3638
} from '../../../utils/messages.js'
3739
import type { SDKAssistantMessageError } from '../../../entrypoints/agentSdkTypes.js'
3840
import {
3941
isToolSearchEnabled,
4042
extractDiscoveredToolNames,
43+
isDeferredToolsDeltaEnabled,
4144
} from '../../../utils/toolSearch.js'
4245
import {
46+
formatDeferredToolLine,
4347
isDeferredTool,
4448
TOOL_SEARCH_TOOL_NAME,
4549
} from '@claude-code-best/builtin-tools/tools/ToolSearchTool/prompt.js'
4650

51+
/**
52+
* Mirrors the Anthropic request path's deferred-tool announcement for OpenAI.
53+
*
54+
* OpenAI-compatible endpoints cannot consume Anthropic's `defer_loading` or
55+
* `tool_reference` beta payloads directly, so the model needs the same textual
56+
* list of deferred MCP tool names that Anthropic receives before it can ask
57+
* ToolSearchTool to load their full schemas.
58+
*/
59+
function prependDeferredToolListIfNeeded(
60+
messages: (AssistantMessage | UserMessage)[],
61+
tools: Tools,
62+
deferredToolNames: Set<string>,
63+
useToolSearch: boolean,
64+
): (AssistantMessage | UserMessage)[] {
65+
if (!useToolSearch || isDeferredToolsDeltaEnabled()) return messages
66+
67+
const deferredToolList = tools
68+
.filter(tool => deferredToolNames.has(tool.name))
69+
.map(formatDeferredToolLine)
70+
.sort()
71+
.join('\n')
72+
73+
if (!deferredToolList) return messages
74+
75+
return [
76+
createUserMessage({
77+
content: `<available-deferred-tools>\n${deferredToolList}\n</available-deferred-tools>`,
78+
isMeta: true,
79+
}),
80+
...messages,
81+
]
82+
}
83+
84+
function isOpenAIConvertibleMessage(msg: Message): msg is AssistantMessage | UserMessage {
85+
return msg.type === 'assistant' || msg.type === 'user'
86+
}
87+
4788
/**
4889
* Assemble the final AssistantMessage (and optional max_tokens error) from
4990
* accumulated stream state. Extracted to avoid duplication between the
@@ -176,9 +217,18 @@ export async function* queryModelOpenAI(
176217

177218
// 8. Convert messages and tools to OpenAI format
178219
const enableThinking = isOpenAIThinkingEnabled(openaiModel)
179-
const openaiMessages = anthropicMessagesToOpenAI(messagesForAPI, systemPrompt, {
180-
enableThinking,
181-
})
220+
const openAIConvertibleMessages = messagesForAPI.filter(isOpenAIConvertibleMessage)
221+
const messagesWithDeferredToolList = prependDeferredToolListIfNeeded(
222+
openAIConvertibleMessages,
223+
tools,
224+
deferredToolNames,
225+
useToolSearch,
226+
)
227+
const openaiMessages = anthropicMessagesToOpenAI(
228+
messagesWithDeferredToolList,
229+
systemPrompt,
230+
{ enableThinking },
231+
)
182232
const openaiTools = anthropicToolsToOpenAI(standardTools)
183233
const openaiToolChoice = anthropicToolChoiceToOpenAI(options.toolChoice)
184234

@@ -356,7 +406,7 @@ export async function* queryModelOpenAI(
356406
recordLLMObservation(options.langfuseTrace ?? null, {
357407
model: openaiModel,
358408
provider: 'openai',
359-
input: convertMessagesToLangfuse(messagesForAPI, systemPrompt),
409+
input: convertMessagesToLangfuse(openaiMessages),
360410
output: convertOutputToLangfuse(collectedMessages),
361411
usage: {
362412
input_tokens: usage.input_tokens,

src/services/langfuse/__tests__/langfuse.test.ts

Lines changed: 94 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -184,6 +184,100 @@ describe('Langfuse integration', () => {
184184
})
185185
})
186186

187+
describe('convertMessagesToLangfuse', () => {
188+
test('preserves OpenAI-style messages including deferred tool announcements', async () => {
189+
const { convertMessagesToLangfuse } = await import('../convert.js')
190+
const result = convertMessagesToLangfuse([
191+
{
192+
role: 'system',
193+
content: 'system prompt',
194+
},
195+
{
196+
role: 'user',
197+
content:
198+
'<available-deferred-tools>\nmcp__wechat__send_message\n</available-deferred-tools>',
199+
},
200+
])
201+
202+
expect(result).toEqual([
203+
{ role: 'system', content: 'system prompt' },
204+
{
205+
role: 'user',
206+
content:
207+
'<available-deferred-tools>\nmcp__wechat__send_message\n</available-deferred-tools>',
208+
},
209+
])
210+
})
211+
212+
test('preserves roles for OpenAI-style array content messages', async () => {
213+
const { convertMessagesToLangfuse } = await import('../convert.js')
214+
const result = convertMessagesToLangfuse([
215+
{
216+
role: 'system',
217+
content: [{ type: 'text', text: 'system reminder' }],
218+
},
219+
{
220+
role: 'tool',
221+
tool_call_id: 'call_1',
222+
content: [{ type: 'text', text: 'tool output' }],
223+
},
224+
])
225+
226+
expect(result).toEqual([
227+
{ role: 'system', content: 'system reminder' },
228+
{ role: 'tool', content: 'tool output', tool_call_id: 'call_1' },
229+
])
230+
})
231+
232+
test('merges assistant tool calls from OpenAI-style array content', async () => {
233+
const { convertMessagesToLangfuse } = await import('../convert.js')
234+
const result = convertMessagesToLangfuse([
235+
{
236+
role: 'assistant',
237+
content: [
238+
{
239+
type: 'text',
240+
text: 'calling a tool',
241+
tool_calls: [
242+
{
243+
id: 'call_from_part',
244+
type: 'function',
245+
function: { name: 'part_tool', arguments: '{}' },
246+
},
247+
],
248+
},
249+
],
250+
tool_calls: [
251+
{
252+
id: 'call_from_message',
253+
type: 'function',
254+
function: { name: 'message_tool', arguments: '{"ok":true}' },
255+
},
256+
],
257+
},
258+
])
259+
260+
expect(result).toEqual([
261+
{
262+
role: 'assistant',
263+
content: 'calling a tool',
264+
tool_calls: [
265+
{
266+
id: 'call_from_message',
267+
type: 'function',
268+
function: { name: 'message_tool', arguments: '{"ok":true}' },
269+
},
270+
{
271+
id: 'call_from_part',
272+
type: 'function',
273+
function: { name: 'part_tool', arguments: '{}' },
274+
},
275+
],
276+
},
277+
])
278+
})
279+
})
280+
187281
// ── client tests ────────────────────────────────────────────────────────────
188282

189283
describe('isLangfuseEnabled', () => {

0 commit comments

Comments
 (0)