Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 19 additions & 5 deletions astrbot/core/provider/sources/openai_source.py
Original file line number Diff line number Diff line change
Expand Up @@ -889,11 +889,25 @@ async def _parse_openai_completion(
and not has_reasoning_output
and not llm_response.tools_call_args
):
logger.error(f"OpenAI completion has no usable output: {completion}.")
raise EmptyModelOutputError(
"OpenAI completion has no usable output. "
f"response_id={completion.id}, finish_reason={choice.finish_reason}"
)
# Some models (e.g. GPT-5 series) complete normally but put all
# tokens into internal reasoning, leaving content/reasoning_content
# both None. When finish_reason is "stop" this is not an error.
if choice.finish_reason == "stop":
Comment on lines +892 to +895
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🚨 suggestion (security): Logging the full completion object in the error path may be heavy and could expose more data than necessary.

In the error branch, the full completion object is logged, which increases log volume and risks exposing sensitive payloads. Consider logging only key fields (e.g., response id, model, choice index, finish reason), and reserve full-object dumps for debug-level logs when strictly necessary.

Suggested implementation:

            # Some models (e.g. GPT-5 series) complete normally but put all
            # tokens into internal reasoning, leaving content/reasoning_content
            # both None.  When finish_reason is "stop" this is not an error.
            if choice.finish_reason == "stop":
                logger.warning(
                    "OpenAI completion returned no visible content "
                    f"(response_id={completion.id}, model={completion.model}, "
                    f"choice_index={choice.index}, finish_reason={choice.finish_reason}, "
                    "marker=internal_reasoning_only). "
                    "The model may have used internal reasoning only."
                )
            else:
                # Log only key metadata at error level to avoid dumping the full completion.
                logger.error(
                    "OpenAI completion has no usable output. "
                    f"(response_id={completion.id}, model={completion.model}, "
                    f"choice_index={choice.index}, finish_reason={choice.finish_reason})"
                )
                # Full completion object is logged at debug level only, to limit log volume
                # and exposure of sensitive payload data.
                logger.debug("OpenAI completion with no usable output: %r", completion)
                raise EmptyModelOutputError(
                    "OpenAI completion has no usable output. "
                    f"response_id={completion.id}, finish_reason={choice.finish_reason}"
                )

If the surrounding code uses a different logging style (e.g., logger.error("...", extra={...}) or structured logging helpers), you may want to adapt the new logger.error and logger.debug calls to match that convention. Also ensure EmptyModelOutputError is imported in this module if it is not already.

logger.warning(
"OpenAI completion returned no visible content "
f"(response_id={completion.id}, model={completion.model}, "
f"choice_index={choice.index}, finish_reason={choice.finish_reason}, "
"marker=internal_reasoning_only). "
"The model may have used internal reasoning only."
)
if not llm_response.result_chain:
llm_response.result_chain = MessageChain().message("")
else:
logger.error(f"OpenAI completion has no usable output: {completion}.")
raise EmptyModelOutputError(
"OpenAI completion has no usable output. "
f"response_id={completion.id}, finish_reason={choice.finish_reason}"
)

llm_response.raw_completion = completion
llm_response.id = completion.id
Expand Down
41 changes: 40 additions & 1 deletion tests/test_openai_source.py
Original file line number Diff line number Diff line change
Expand Up @@ -1142,6 +1142,7 @@ async def fake_create(**kwargs):

@pytest.mark.asyncio
async def test_parse_openai_completion_raises_empty_model_output_error():
"""Empty output with non-stop finish_reason should raise."""
provider = _make_provider()
try:
completion = ChatCompletion.model_validate(
Expand All @@ -1159,7 +1160,7 @@ async def test_parse_openai_completion_raises_empty_model_output_error():
"refusal": None,
"tool_calls": None,
},
"finish_reason": "stop",
"finish_reason": "length",
}
],
"usage": {
Expand All @@ -1176,6 +1177,44 @@ async def test_parse_openai_completion_raises_empty_model_output_error():
await provider.terminate()


@pytest.mark.asyncio
async def test_parse_openai_completion_empty_content_finish_stop_no_error():
"""Empty output with finish_reason=stop should return gracefully (GPT-5 etc.)."""
provider = _make_provider()
try:
completion = ChatCompletion.model_validate(
{
"id": "chatcmpl-empty-stop",
"object": "chat.completion",
"created": 0,
"model": "gpt-5.4",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": None,
"refusal": None,
"tool_calls": None,
},
"finish_reason": "stop",
}
],
"usage": {
"prompt_tokens": 13,
"completion_tokens": 18,
"total_tokens": 31,
},
}
)

resp = await provider._parse_openai_completion(completion, tools=None)
assert resp.role == "assistant"
assert resp.result_chain is not None
finally:
await provider.terminate()


@pytest.mark.asyncio
async def test_query_stream_extracts_usage_from_empty_choices_chunk(monkeypatch):
provider = _make_provider()
Expand Down
Loading