|
17 | 17 | from openai.types.chat.chat_completion_chunk import ChoiceDelta |
18 | 18 | from openai.types.completion_usage import CompletionUsage, PromptTokensDetails |
19 | 19 | from openai.types.responses import Response, ResponseCompletedEvent, ResponseOutputMessage |
| 20 | +from openai.types.responses.response_error_event import ResponseErrorEvent |
| 21 | +from openai.types.responses.response_failed_event import ResponseFailedEvent |
| 22 | +from openai.types.responses.response_incomplete_event import ResponseIncompleteEvent |
20 | 23 | from openai.types.responses.response_output_text import ResponseOutputText |
21 | 24 | from openai.types.responses.response_usage import ( |
22 | 25 | InputTokensDetails, |
|
28 | 31 | from agents import ( |
29 | 32 | Agent, |
30 | 33 | Handoff, |
| 34 | + ModelBehaviorError, |
31 | 35 | ModelSettings, |
32 | 36 | ModelTracing, |
33 | 37 | Tool, |
@@ -534,6 +538,91 @@ async def response_stream() -> AsyncIterator[ResponseCompletedEvent]: |
534 | 538 | assert provider.private_responses_calls[0]["params"].conversation == "conv_123" |
535 | 539 |
|
536 | 540 |
|
| 541 | +@pytest.mark.allow_call_model_methods |
| 542 | +@pytest.mark.asyncio |
| 543 | +@pytest.mark.parametrize( |
| 544 | + ("terminal_event_type", "terminal_event_cls"), |
| 545 | + [ |
| 546 | + ("response.incomplete", ResponseIncompleteEvent), |
| 547 | + ("response.failed", ResponseFailedEvent), |
| 548 | + ], |
| 549 | +) |
| 550 | +async def test_any_llm_responses_stream_rejects_failed_terminal_events( |
| 551 | + monkeypatch, |
| 552 | + terminal_event_type: str, |
| 553 | + terminal_event_cls: type[Any], |
| 554 | +) -> None: |
| 555 | + async def response_stream() -> AsyncIterator[Any]: |
| 556 | + yield terminal_event_cls( |
| 557 | + type=terminal_event_type, |
| 558 | + response=_response("partial", response_id="resp-terminal"), |
| 559 | + sequence_number=1, |
| 560 | + ) |
| 561 | + |
| 562 | + provider = FakeAnyLLMProvider(supports_responses=True, responses_response=response_stream()) |
| 563 | + module, _create_calls = _import_any_llm_module(monkeypatch, provider) |
| 564 | + AnyLLMModel = module.AnyLLMModel |
| 565 | + |
| 566 | + model = AnyLLMModel(model="openai/gpt-5.4-mini") |
| 567 | + events = [] |
| 568 | + with pytest.raises(ModelBehaviorError, match=terminal_event_type): |
| 569 | + async for event in model.stream_response( |
| 570 | + system_instructions=None, |
| 571 | + input="hi", |
| 572 | + model_settings=ModelSettings(), |
| 573 | + tools=[], |
| 574 | + output_schema=None, |
| 575 | + handoffs=[], |
| 576 | + tracing=ModelTracing.DISABLED, |
| 577 | + previous_response_id=None, |
| 578 | + conversation_id=None, |
| 579 | + prompt=None, |
| 580 | + ): |
| 581 | + events.append(event) |
| 582 | + |
| 583 | + assert len(events) == 1 |
| 584 | + assert events[0].type == terminal_event_type |
| 585 | + assert events[0].response.id == "resp-terminal" |
| 586 | + |
| 587 | + |
| 588 | +@pytest.mark.allow_call_model_methods |
| 589 | +@pytest.mark.asyncio |
| 590 | +async def test_any_llm_responses_stream_rejects_error_event(monkeypatch) -> None: |
| 591 | + async def response_stream() -> AsyncIterator[ResponseErrorEvent]: |
| 592 | + yield ResponseErrorEvent( |
| 593 | + type="error", |
| 594 | + code="invalid_request_error", |
| 595 | + message="bad request", |
| 596 | + param=None, |
| 597 | + sequence_number=1, |
| 598 | + ) |
| 599 | + |
| 600 | + provider = FakeAnyLLMProvider(supports_responses=True, responses_response=response_stream()) |
| 601 | + module, _create_calls = _import_any_llm_module(monkeypatch, provider) |
| 602 | + AnyLLMModel = module.AnyLLMModel |
| 603 | + |
| 604 | + model = AnyLLMModel(model="openai/gpt-5.4-mini") |
| 605 | + events = [] |
| 606 | + with pytest.raises(ModelBehaviorError, match="invalid_request_error"): |
| 607 | + async for event in model.stream_response( |
| 608 | + system_instructions=None, |
| 609 | + input="hi", |
| 610 | + model_settings=ModelSettings(), |
| 611 | + tools=[], |
| 612 | + output_schema=None, |
| 613 | + handoffs=[], |
| 614 | + tracing=ModelTracing.DISABLED, |
| 615 | + previous_response_id=None, |
| 616 | + conversation_id=None, |
| 617 | + prompt=None, |
| 618 | + ): |
| 619 | + events.append(event) |
| 620 | + |
| 621 | + assert len(events) == 1 |
| 622 | + assert events[0].type == "error" |
| 623 | + assert events[0].code == "invalid_request_error" |
| 624 | + |
| 625 | + |
537 | 626 | @pytest.mark.allow_call_model_methods |
538 | 627 | @pytest.mark.asyncio |
539 | 628 | async def test_any_llm_responses_path_passes_transport_kwargs_via_private_provider_api( |
|
0 commit comments