Skip to content

Commit 67b4b74

Browse files
committed
fix(responses): complete done-terminated streams without finish_reason
Allow response.completed to be emitted on [DONE] when the stream already produced valid Responses output, so clean EOF does not trigger false retry loops.
1 parent f239628 commit 67b4b74

File tree

2 files changed

+110
-8
lines changed

2 files changed

+110
-8
lines changed

internal/translator/openai/openai/responses/openai_openai-responses_response.go

Lines changed: 20 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,19 @@ func emitRespEvent(event string, payload []byte) []byte {
6262
return translatorcommon.SSEEventData(event, payload)
6363
}
6464

65+
func hasCompletedResponseOutput(st *oaiToResponsesState) bool {
66+
if st == nil {
67+
return false
68+
}
69+
if len(st.MsgItemAdded) > 0 || len(st.FuncCallIDs) > 0 || len(st.Reasonings) > 0 {
70+
return true
71+
}
72+
if st.ReasoningID != "" && st.ReasoningBuf.Len() > 0 {
73+
return true
74+
}
75+
return false
76+
}
77+
6578
func buildResponsesCompletedEvent(st *oaiToResponsesState, requestRawJSON []byte, nextSeq func() int) []byte {
6679
completed := []byte(`{"type":"response.completed","sequence_number":0,"response":{"id":"","object":"response","created_at":0,"status":"completed","background":false,"error":null}}`)
6780
completed, _ = sjson.SetBytes(completed, "sequence_number", nextSeq())
@@ -146,6 +159,12 @@ func buildResponsesCompletedEvent(st *oaiToResponsesState, requestRawJSON []byte
146159
outputItems = append(outputItems, completedOutputItem{index: r.OutputIndex, raw: item})
147160
}
148161
}
162+
if st.ReasoningID != "" {
163+
item := []byte(`{"id":"","type":"reasoning","summary":[{"type":"summary_text","text":""}]}`)
164+
item, _ = sjson.SetBytes(item, "id", st.ReasoningID)
165+
item, _ = sjson.SetBytes(item, "summary.0.text", st.ReasoningBuf.String())
166+
outputItems = append(outputItems, completedOutputItem{index: st.ReasoningIndex, raw: item})
167+
}
149168
if len(st.MsgItemAdded) > 0 {
150169
for i := range st.MsgItemAdded {
151170
txt := ""
@@ -227,7 +246,7 @@ func ConvertOpenAIChatCompletionsResponseToOpenAIResponses(ctx context.Context,
227246
return [][]byte{}
228247
}
229248
if bytes.Equal(rawJSON, []byte("[DONE]")) {
230-
if st.CompletionPending && !st.CompletedEmitted {
249+
if !st.CompletedEmitted && (st.CompletionPending || hasCompletedResponseOutput(st)) {
231250
st.CompletedEmitted = true
232251
return [][]byte{buildResponsesCompletedEvent(st, requestRawJSON, func() int { st.Seq++; return st.Seq })}
233252
}

internal/translator/openai/openai/responses/openai_openai-responses_response_test.go

Lines changed: 90 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -30,13 +30,17 @@ func TestConvertOpenAIChatCompletionsResponseToOpenAIResponses_ResponseCompleted
3030
request := []byte(`{"model":"gpt-5.4","tool_choice":"auto","parallel_tool_calls":true}`)
3131

3232
tests := []struct {
33-
name string
34-
in []string
35-
doneInputIndex int // Index in tt.in where the terminal [DONE] chunk arrives and response.completed must be emitted.
36-
hasUsage bool
37-
inputTokens int64
38-
outputTokens int64
39-
totalTokens int64
33+
name string
34+
in []string
35+
doneInputIndex int // Index in tt.in where the terminal [DONE] chunk arrives and response.completed must be emitted.
36+
hasUsage bool
37+
inputTokens int64
38+
outputTokens int64
39+
totalTokens int64
40+
wantMessageText string
41+
wantFunctionCallID string
42+
wantFunctionCallName string
43+
wantFunctionArguments string
4044
}{
4145
{
4246
// A provider may send finish_reason first and only attach usage in a later chunk (e.g. Vertex AI),
@@ -138,6 +142,85 @@ func TestConvertOpenAIChatCompletionsResponseToOpenAIResponses_ResponseCompleted
138142
}
139143
}
140144

145+
func TestConvertOpenAIChatCompletionsResponseToOpenAIResponses_MessageWithoutFinishReasonCompletesOnDone(t *testing.T) {
146+
t.Parallel()
147+
148+
request := []byte(`{"model":"gpt-5.4","tool_choice":"auto","parallel_tool_calls":true}`)
149+
in := []string{
150+
`data: {"id":"resp_msg_no_finish","object":"chat.completion.chunk","created":1773896263,"model":"model","choices":[{"index":0,"delta":{"role":"assistant","content":"hello world","reasoning_content":null,"tool_calls":null},"finish_reason":null}]}`,
151+
`data: [DONE]`,
152+
}
153+
154+
var param any
155+
completedInputIndex := -1
156+
var completedData gjson.Result
157+
for i, line := range in {
158+
for _, chunk := range ConvertOpenAIChatCompletionsResponseToOpenAIResponses(context.Background(), "model", request, request, []byte(line), &param) {
159+
event, data := parseOpenAIResponsesSSEEvent(t, chunk)
160+
if event != "response.completed" {
161+
continue
162+
}
163+
completedInputIndex = i
164+
completedData = data
165+
}
166+
}
167+
168+
if completedInputIndex != 1 {
169+
t.Fatalf("expected response.completed on terminal [DONE] chunk at input index 1, got %d", completedInputIndex)
170+
}
171+
if got := completedData.Get("response.output.0.type").String(); got != "message" {
172+
t.Fatalf("unexpected response.output.0.type: got %q want %q", got, "message")
173+
}
174+
if got := completedData.Get("response.output.0.content.0.text").String(); got != "hello world" {
175+
t.Fatalf("unexpected response.output.0.content.0.text: got %q want %q", got, "hello world")
176+
}
177+
}
178+
179+
func TestConvertOpenAIChatCompletionsResponseToOpenAIResponses_FunctionCallWithoutFinishReasonCompletesOnDone(t *testing.T) {
180+
t.Parallel()
181+
182+
request := []byte(`{"model":"gpt-5.4","tool_choice":"auto","parallel_tool_calls":true}`)
183+
in := []string{
184+
`data: {"id":"resp_func_no_finish","object":"chat.completion.chunk","created":1773896263,"model":"model","choices":[{"index":0,"delta":{"role":"assistant","content":null,"reasoning_content":null,"tool_calls":[{"index":0,"id":"call_func_no_finish","type":"function","function":{"name":"read","arguments":""}}]},"finish_reason":null}]}`,
185+
`data: {"id":"resp_func_no_finish","object":"chat.completion.chunk","created":1773896263,"model":"model","choices":[{"index":0,"delta":{"role":null,"content":null,"reasoning_content":null,"tool_calls":[{"index":0,"function":{"arguments":"{\"filePath\":\"C:\\\\repo\\\\README.md\"}"}}]},"finish_reason":null}]}`,
186+
`data: [DONE]`,
187+
}
188+
189+
var param any
190+
completedInputIndex := -1
191+
var completedData gjson.Result
192+
for i, line := range in {
193+
for _, chunk := range ConvertOpenAIChatCompletionsResponseToOpenAIResponses(context.Background(), "model", request, request, []byte(line), &param) {
194+
event, data := parseOpenAIResponsesSSEEvent(t, chunk)
195+
if event != "response.completed" {
196+
continue
197+
}
198+
completedInputIndex = i
199+
completedData = data
200+
}
201+
}
202+
203+
if completedInputIndex != 2 {
204+
t.Fatalf("expected response.completed on terminal [DONE] chunk at input index 2, got %d", completedInputIndex)
205+
}
206+
if got := completedData.Get("response.output.0.type").String(); got != "function_call" {
207+
t.Fatalf("unexpected response.output.0.type: got %q want %q", got, "function_call")
208+
}
209+
if got := completedData.Get("response.output.0.call_id").String(); got != "call_func_no_finish" {
210+
t.Fatalf("unexpected response.output.0.call_id: got %q want %q", got, "call_func_no_finish")
211+
}
212+
if got := completedData.Get("response.output.0.name").String(); got != "read" {
213+
t.Fatalf("unexpected response.output.0.name: got %q want %q", got, "read")
214+
}
215+
args := completedData.Get("response.output.0.arguments").String()
216+
if !gjson.Valid(args) {
217+
t.Fatalf("expected response.output.0.arguments to be valid JSON, got %q", args)
218+
}
219+
if got := gjson.Get(args, "filePath").String(); got != `C:\repo\README.md` {
220+
t.Fatalf("unexpected response.output.0.arguments.filePath: got %q want %q", got, `C:\repo\README.md`)
221+
}
222+
}
223+
141224
func TestConvertOpenAIChatCompletionsResponseToOpenAIResponses_MultipleToolCallsRemainSeparate(t *testing.T) {
142225
in := []string{
143226
`data: {"id":"resp_test","object":"chat.completion.chunk","created":1773896263,"model":"model","choices":[{"index":0,"delta":{"role":"assistant","content":null,"reasoning_content":null,"tool_calls":[{"index":0,"id":"call_read","type":"function","function":{"name":"read","arguments":""}}]},"finish_reason":null}]}`,

0 commit comments

Comments
 (0)