Skip to content

Commit 80d5efd

Browse files
authored
fix: empty model output error may misfire when use gemini (#7377)
1 parent ff299f7 commit 80d5efd

File tree

1 file changed

+24
-13
lines changed

1 file changed

+24
-13
lines changed

astrbot/core/provider/sources/gemini_source.py

Lines changed: 24 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -461,14 +461,19 @@ def _process_content_parts(
461461
self,
462462
candidate: types.Candidate,
463463
llm_response: LLMResponse,
464+
*,
465+
validate_output: bool = True,
464466
) -> MessageChain:
465467
"""处理内容部分并构建消息链"""
466468
if not candidate.content:
467469
logger.warning(f"收到的 candidate.content 为空: {candidate}")
468-
raise EmptyModelOutputError(
469-
"Gemini candidate content is empty. "
470-
f"finish_reason={candidate.finish_reason}"
471-
)
470+
if validate_output:
471+
raise EmptyModelOutputError(
472+
"Gemini candidate content is empty. "
473+
f"finish_reason={candidate.finish_reason}"
474+
)
475+
llm_response.result_chain = MessageChain(chain=[])
476+
return llm_response.result_chain
472477

473478
finish_reason = candidate.finish_reason
474479
result_parts: list[types.Part] | None = candidate.content.parts
@@ -490,10 +495,13 @@ def _process_content_parts(
490495

491496
if not result_parts:
492497
logger.warning(f"收到的 candidate.content.parts 为空: {candidate}")
493-
raise EmptyModelOutputError(
494-
"Gemini candidate content parts are empty. "
495-
f"finish_reason={candidate.finish_reason}"
496-
)
498+
if validate_output:
499+
raise EmptyModelOutputError(
500+
"Gemini candidate content parts are empty. "
501+
f"finish_reason={candidate.finish_reason}"
502+
)
503+
llm_response.result_chain = MessageChain(chain=[])
504+
return llm_response.result_chain
497505

498506
# 提取 reasoning content
499507
reasoning = self._extract_reasoning_content(candidate)
@@ -550,11 +558,12 @@ def _process_content_parts(
550558
llm_response.reasoning_signature = base64.b64encode(ts).decode("utf-8")
551559
chain_result = MessageChain(chain=chain)
552560
llm_response.result_chain = chain_result
553-
self._ensure_usable_response(
554-
llm_response,
555-
response_id=None,
556-
finish_reason=str(finish_reason) if finish_reason is not None else None,
557-
)
561+
if validate_output:
562+
self._ensure_usable_response(
563+
llm_response,
564+
response_id=None,
565+
finish_reason=str(finish_reason) if finish_reason is not None else None,
566+
)
558567
return chain_result
559568

560569
async def _query(self, payloads: dict, tools: ToolSet | None) -> LLMResponse:
@@ -708,6 +717,7 @@ async def _query_stream(
708717
llm_response.result_chain = self._process_content_parts(
709718
chunk.candidates[0],
710719
llm_response,
720+
validate_output=False,
711721
)
712722
llm_response.id = chunk.response_id
713723
if chunk.usage_metadata:
@@ -738,6 +748,7 @@ async def _query_stream(
738748
final_response.result_chain = self._process_content_parts(
739749
chunk.candidates[0],
740750
final_response,
751+
validate_output=False,
741752
)
742753
final_response.id = chunk.response_id
743754
if chunk.usage_metadata:

0 commit comments

Comments
 (0)