Skip to content

Commit 8d9a49b

Browse files
committed
fix: add chat_id parameter to _yield_mcp_response and update tool call handling
1 parent 651378d commit 8d9a49b

File tree

2 files changed

+116
-116
lines changed

2 files changed

+116
-116
lines changed

apps/application/flow/tools.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -354,7 +354,7 @@ async def _initialize_skills(mcp_servers, temp_dir):
354354

355355

356356
async def _yield_mcp_response(chat_model, message_list, mcp_servers, mcp_output_enable=True, tool_init_params={},
357-
source_id=None, source_type=None, temp_dir=None):
357+
source_id=None, source_type=None, temp_dir=None, chat_id=None):
358358
try:
359359
checkpointer = MemorySaver()
360360
client, skills_dir = await _initialize_skills(mcp_servers, temp_dir)
@@ -374,7 +374,7 @@ async def _yield_mcp_response(chat_model, message_list, mcp_servers, mcp_output_
374374
recursion_limit = int(CONFIG.get("LANGCHAIN_GRAPH_RECURSION_LIMIT", '100'))
375375
response = agent.astream(
376376
{"messages": message_list},
377-
config={"recursion_limit": recursion_limit, "configurable": {"thread_id": str(uuid.uuid7())}},
377+
config={"recursion_limit": recursion_limit, "configurable": {"thread_id": chat_id}},
378378
stream_mode='messages'
379379
)
380380

@@ -548,7 +548,7 @@ def mcp_response_generator(chat_model, message_list, mcp_servers, mcp_output_ena
548548
async def _run():
549549
try:
550550
async_gen = _yield_mcp_response(chat_model, message_list, mcp_servers, mcp_output_enable, tool_init_params,
551-
source_id, source_type, temp_dir)
551+
source_id, source_type, temp_dir, chat_id)
552552
async for chunk in async_gen:
553553
result_queue.put(('data', chunk))
554554
except Exception as e:

apps/models_provider/impl/base_chat_open_ai.py

Lines changed: 113 additions & 113 deletions
Original file line numberDiff line numberDiff line change
@@ -13,74 +13,74 @@
1313
from langchain_core.runnables import RunnableConfig, ensure_config
1414
from langchain_core.tools import BaseTool
1515
from langchain_openai import ChatOpenAI
16-
from langchain_openai.chat_models.base import _create_usage_metadata
16+
from langchain_openai.chat_models.base import _create_usage_metadata, _convert_delta_to_message_chunk
1717

1818
from common.config.tokenizer_manage_config import TokenizerManage
1919
from common.utils.logger import maxkb_logger
2020

2121
def custom_get_token_ids(text: str):
2222
tokenizer = TokenizerManage.get_tokenizer()
2323
return tokenizer.encode(text)
24-
25-
26-
def _convert_delta_to_message_chunk(
27-
_dict: Mapping[str, Any], default_class: type[BaseMessageChunk]
28-
) -> BaseMessageChunk:
29-
id_ = _dict.get("id")
30-
role = cast(str, _dict.get("role"))
31-
content = cast(str, _dict.get("content") or "")
32-
additional_kwargs: dict = {}
33-
if 'reasoning_content' in _dict:
34-
additional_kwargs['reasoning_content'] = _dict.get('reasoning_content')
35-
if _dict.get("function_call"):
36-
function_call = dict(_dict["function_call"])
37-
if "name" in function_call and function_call["name"] is None:
38-
function_call["name"] = ""
39-
additional_kwargs["function_call"] = function_call
40-
tool_call_chunks = []
41-
if raw_tool_calls := _dict.get("tool_calls"):
42-
additional_kwargs["tool_calls"] = raw_tool_calls
43-
try:
44-
tool_call_chunks = [
45-
tool_call_chunk(
46-
name=rtc["function"].get("name"),
47-
args=rtc["function"].get("arguments"),
48-
id=rtc.get("id"),
49-
index=rtc["index"],
50-
)
51-
for rtc in raw_tool_calls
52-
]
53-
except KeyError:
54-
pass
55-
56-
if role == "user" or default_class == HumanMessageChunk:
57-
return HumanMessageChunk(content=content, id=id_)
58-
elif role == "assistant" or default_class == AIMessageChunk:
59-
return AIMessageChunk(
60-
content=content,
61-
additional_kwargs=additional_kwargs,
62-
id=id_,
63-
tool_call_chunks=tool_call_chunks, # type: ignore[arg-type]
64-
)
65-
elif role in ("system", "developer") or default_class == SystemMessageChunk:
66-
if role == "developer":
67-
additional_kwargs = {"__openai_role__": "developer"}
68-
else:
69-
additional_kwargs = {}
70-
return SystemMessageChunk(
71-
content=content, id=id_, additional_kwargs=additional_kwargs
72-
)
73-
elif role == "function" or default_class == FunctionMessageChunk:
74-
return FunctionMessageChunk(content=content, name=_dict["name"], id=id_)
75-
elif role == "tool" or default_class == ToolMessageChunk:
76-
return ToolMessageChunk(
77-
content=content, tool_call_id=_dict["tool_call_id"], id=id_
78-
)
79-
elif role or default_class == ChatMessageChunk:
80-
return ChatMessageChunk(content=content, role=role, id=id_)
81-
else:
82-
return default_class(content=content, id=id_) # type: ignore
83-
24+
#
25+
#
26+
# def _convert_delta_to_message_chunk(
27+
# _dict: Mapping[str, Any], default_class: type[BaseMessageChunk]
28+
# ) -> BaseMessageChunk:
29+
# id_ = _dict.get("id")
30+
# role = cast(str, _dict.get("role"))
31+
# content = cast(str, _dict.get("content") or "")
32+
# additional_kwargs: dict = {}
33+
# if 'reasoning_content' in _dict:
34+
# additional_kwargs['reasoning_content'] = _dict.get('reasoning_content')
35+
# if _dict.get("function_call"):
36+
# function_call = dict(_dict["function_call"])
37+
# if "name" in function_call and function_call["name"] is None:
38+
# function_call["name"] = ""
39+
# additional_kwargs["function_call"] = function_call
40+
# tool_call_chunks = []
41+
# if raw_tool_calls := _dict.get("tool_calls"):
42+
# additional_kwargs["tool_calls"] = raw_tool_calls
43+
# try:
44+
# tool_call_chunks = [
45+
# tool_call_chunk(
46+
# name=rtc["function"].get("name"),
47+
# args=rtc["function"].get("arguments"),
48+
# id=rtc.get("id"),
49+
# index=rtc["index"],
50+
# )
51+
# for rtc in raw_tool_calls
52+
# ]
53+
# except KeyError:
54+
# pass
55+
#
56+
# if role == "user" or default_class == HumanMessageChunk:
57+
# return HumanMessageChunk(content=content, id=id_)
58+
# elif role == "assistant" or default_class == AIMessageChunk:
59+
# return AIMessageChunk(
60+
# content=content,
61+
# additional_kwargs=additional_kwargs,
62+
# id=id_,
63+
# tool_call_chunks=tool_call_chunks, # type: ignore[arg-type]
64+
# )
65+
# elif role in ("system", "developer") or default_class == SystemMessageChunk:
66+
# if role == "developer":
67+
# additional_kwargs = {"__openai_role__": "developer"}
68+
# else:
69+
# additional_kwargs = {}
70+
# return SystemMessageChunk(
71+
# content=content, id=id_, additional_kwargs=additional_kwargs
72+
# )
73+
# elif role == "function" or default_class == FunctionMessageChunk:
74+
# return FunctionMessageChunk(content=content, name=_dict["name"], id=id_)
75+
# elif role == "tool" or default_class == ToolMessageChunk:
76+
# return ToolMessageChunk(
77+
# content=content, tool_call_id=_dict["tool_call_id"], id=id_
78+
# )
79+
# elif role or default_class == ChatMessageChunk:
80+
# return ChatMessageChunk(content=content, role=role, id=id_)
81+
# else:
82+
# return default_class(content=content, id=id_) # type: ignore
83+
#
8484

8585
class BaseChatOpenAI(ChatOpenAI):
8686
usage_metadata: dict = {}
@@ -131,58 +131,58 @@ def _stream(self, *args: Any, **kwargs: Any) -> Iterator[ChatGenerationChunk]:
131131
self.usage_metadata = chunk.message.usage_metadata
132132
yield chunk
133133

134-
def _convert_chunk_to_generation_chunk(
135-
self,
136-
chunk: dict,
137-
default_chunk_class: type,
138-
base_generation_info: Optional[dict],
139-
) -> Optional[ChatGenerationChunk]:
140-
if chunk.get("type") == "content.delta": # from beta.chat.completions.stream
141-
return None
142-
token_usage = chunk.get("usage")
143-
choices = (
144-
chunk.get("choices", [])
145-
# from beta.chat.completions.stream
146-
or chunk.get("chunk", {}).get("choices", [])
147-
)
148-
149-
usage_metadata: Optional[UsageMetadata] = (
150-
_create_usage_metadata(token_usage) if token_usage and token_usage.get("prompt_tokens") else None
151-
)
152-
if len(choices) == 0:
153-
# logprobs is implicitly None
154-
generation_chunk = ChatGenerationChunk(
155-
message=default_chunk_class(content="", usage_metadata=usage_metadata)
156-
)
157-
return generation_chunk
158-
159-
choice = choices[0]
160-
if choice["delta"] is None:
161-
return None
162-
163-
message_chunk = _convert_delta_to_message_chunk(
164-
choice["delta"], default_chunk_class
165-
)
166-
generation_info = {**base_generation_info} if base_generation_info else {}
167-
168-
if finish_reason := choice.get("finish_reason"):
169-
generation_info["finish_reason"] = finish_reason
170-
if model_name := chunk.get("model"):
171-
generation_info["model_name"] = model_name
172-
if system_fingerprint := chunk.get("system_fingerprint"):
173-
generation_info["system_fingerprint"] = system_fingerprint
174-
175-
logprobs = choice.get("logprobs")
176-
if logprobs:
177-
generation_info["logprobs"] = logprobs
178-
179-
if usage_metadata and isinstance(message_chunk, AIMessageChunk):
180-
message_chunk.usage_metadata = usage_metadata
181-
182-
generation_chunk = ChatGenerationChunk(
183-
message=message_chunk, generation_info=generation_info or None
184-
)
185-
return generation_chunk
134+
# def _convert_chunk_to_generation_chunk(
135+
# self,
136+
# chunk: dict,
137+
# default_chunk_class: type,
138+
# base_generation_info: Optional[dict],
139+
# ) -> Optional[ChatGenerationChunk]:
140+
# if chunk.get("type") == "content.delta": # from beta.chat.completions.stream
141+
# return None
142+
# token_usage = chunk.get("usage")
143+
# choices = (
144+
# chunk.get("choices", [])
145+
# # from beta.chat.completions.stream
146+
# or chunk.get("chunk", {}).get("choices", [])
147+
# )
148+
#
149+
# usage_metadata: Optional[UsageMetadata] = (
150+
# _create_usage_metadata(token_usage) if token_usage and token_usage.get("prompt_tokens") else None
151+
# )
152+
# if len(choices) == 0:
153+
# # logprobs is implicitly None
154+
# generation_chunk = ChatGenerationChunk(
155+
# message=default_chunk_class(content="", usage_metadata=usage_metadata)
156+
# )
157+
# return generation_chunk
158+
#
159+
# choice = choices[0]
160+
# if choice["delta"] is None:
161+
# return None
162+
#
163+
# message_chunk = _convert_delta_to_message_chunk(
164+
# choice["delta"], default_chunk_class
165+
# )
166+
# generation_info = {**base_generation_info} if base_generation_info else {}
167+
#
168+
# if finish_reason := choice.get("finish_reason"):
169+
# generation_info["finish_reason"] = finish_reason
170+
# if model_name := chunk.get("model"):
171+
# generation_info["model_name"] = model_name
172+
# if system_fingerprint := chunk.get("system_fingerprint"):
173+
# generation_info["system_fingerprint"] = system_fingerprint
174+
#
175+
# logprobs = choice.get("logprobs")
176+
# if logprobs:
177+
# generation_info["logprobs"] = logprobs
178+
#
179+
# if usage_metadata and isinstance(message_chunk, AIMessageChunk):
180+
# message_chunk.usage_metadata = usage_metadata
181+
#
182+
# generation_chunk = ChatGenerationChunk(
183+
# message=message_chunk, generation_info=generation_info or None
184+
# )
185+
# return generation_chunk
186186

187187
def invoke(
188188
self,

0 commit comments

Comments
 (0)