1717logger = logging .getLogger (__name__ )
1818
1919
20+ def _extract_content (response : ChatCompletion ) -> str :
21+ """Extract text content from a chat completion, with fallback for reasoning models.
22+
23+ Some reasoning models (e.g. MiniMax-M2.7, DeepSeek-R1) return their output in
24+ ``reasoning_content`` instead of ``content``. This helper checks both fields.
25+ """
26+ msg = response .choices [0 ].message
27+ content = msg .content
28+ if not content :
29+ content = getattr (msg , "reasoning_content" , None )
30+ return content or ""
31+
32+
2033class OpenAISDKClient :
2134 """OpenAI LLM client that relies on the official Python SDK."""
2235
@@ -59,9 +72,9 @@ async def chat(
5972 temperature = temperature ,
6073 max_tokens = max_tokens ,
6174 )
62- content = response . choices [ 0 ]. message . content
75+ content = _extract_content ( response )
6376 logger .debug ("OpenAI chat response: %s" , response )
64- return content or "" , response
77+ return content , response
6578
6679 async def summarize (
6780 self ,
@@ -82,9 +95,9 @@ async def summarize(
8295 temperature = 1 ,
8396 max_tokens = max_tokens ,
8497 )
85- content = response . choices [ 0 ]. message . content
98+ content = _extract_content ( response )
8699 logger .debug ("OpenAI summarize response: %s" , response )
87- return content or "" , response
100+ return content , response
88101
89102 async def vision (
90103 self ,
@@ -148,9 +161,9 @@ async def vision(
148161 temperature = 1 ,
149162 max_tokens = max_tokens ,
150163 )
151- content = response . choices [ 0 ]. message . content
164+ content = _extract_content ( response )
152165 logger .debug ("OpenAI vision response: %s" , response )
153- return content or "" , response
166+ return content , response
154167
155168 async def embed (self , inputs : list [str ]) -> tuple [list [list [float ]], CreateEmbeddingResponse | None ]:
156169 """Create text embeddings via the official SDK."""
0 commit comments