@@ -394,6 +394,19 @@ def test_format_request(model, messages, tool_specs, system_prompt):
394394 {"chunk_type" : "content_delta" , "data_type" : "reasoning_content" , "data" : "I'm thinking" },
395395 {"contentBlockDelta" : {"delta" : {"reasoningContent" : {"text" : "I'm thinking" }}}},
396396 ),
397+ # Content Delta - Citation
398+ (
399+ {
400+ "chunk_type" : "content_delta" ,
401+ "data_type" : "citation" ,
402+ "data" : {"type" : "url_citation" , "title" : "Example" , "url" : "https://example.com" },
403+ },
404+ {
405+ "contentBlockDelta" : {
406+ "delta" : {"citation" : {"title" : "Example" , "location" : {"web" : {"url" : "https://example.com" }}}}
407+ }
408+ },
409+ ),
397410 # Content Delta - Text
398411 (
399412 {"chunk_type" : "content_delta" , "data_type" : "text" , "data" : "hello" },
@@ -618,6 +631,74 @@ async def test_stream_reasoning_content(openai_client, model, agenerator, alist)
618631 assert len (content_stops ) == 2
619632
620633
634+ @pytest .mark .asyncio
635+ async def test_stream_citation_annotations (openai_client , model , agenerator , alist ):
636+ """Test that web search citation annotations are streamed as CitationsDelta events."""
637+ mock_text_event1 = unittest .mock .Mock (type = "response.output_text.delta" , delta = "The answer is here. " )
638+ mock_text_event2 = unittest .mock .Mock (type = "response.output_text.delta" , delta = "(example.com)" )
639+ mock_annotation_event = unittest .mock .Mock (
640+ type = "response.output_text.annotation.added" ,
641+ annotation = {
642+ "type" : "url_citation" ,
643+ "title" : "Example Source" ,
644+ "url" : "https://example.com/article" ,
645+ },
646+ )
647+ mock_complete_event = unittest .mock .Mock (
648+ type = "response.completed" ,
649+ response = unittest .mock .Mock (usage = unittest .mock .Mock (input_tokens = 10 , output_tokens = 5 , total_tokens = 15 )),
650+ )
651+
652+ openai_client .responses .create = unittest .mock .AsyncMock (
653+ return_value = agenerator ([mock_text_event1 , mock_text_event2 , mock_annotation_event , mock_complete_event ])
654+ )
655+
656+ messages = [{"role" : "user" , "content" : [{"text" : "search something" }]}]
657+ tru_events = await alist (model .stream (messages ))
658+
659+ citation_deltas = [
660+ e for e in tru_events if "contentBlockDelta" in e and "citation" in e ["contentBlockDelta" ]["delta" ]
661+ ]
662+ assert len (citation_deltas ) == 1
663+ assert citation_deltas [0 ] == {
664+ "contentBlockDelta" : {
665+ "delta" : {
666+ "citation" : {
667+ "title" : "Example Source" ,
668+ "location" : {"web" : {"url" : "https://example.com/article" }},
669+ }
670+ }
671+ }
672+ }
673+
674+
675+ @pytest .mark .asyncio
676+ async def test_stream_unsupported_annotation_type (openai_client , model , agenerator , alist , caplog ):
677+ """Test that unsupported annotation types log a warning and are not emitted."""
678+ mock_text_event = unittest .mock .Mock (type = "response.output_text.delta" , delta = "Some text" )
679+ mock_annotation_event = unittest .mock .Mock (
680+ type = "response.output_text.annotation.added" ,
681+ annotation = {"type" : "file_citation" , "file_id" : "file-123" , "filename" : "doc.pdf" },
682+ )
683+ mock_complete_event = unittest .mock .Mock (
684+ type = "response.completed" ,
685+ response = unittest .mock .Mock (usage = unittest .mock .Mock (input_tokens = 10 , output_tokens = 5 , total_tokens = 15 )),
686+ )
687+
688+ openai_client .responses .create = unittest .mock .AsyncMock (
689+ return_value = agenerator ([mock_text_event , mock_annotation_event , mock_complete_event ])
690+ )
691+
692+ messages = [{"role" : "user" , "content" : [{"text" : "search files" }]}]
693+ tru_events = await alist (model .stream (messages ))
694+
695+ citation_deltas = [
696+ e for e in tru_events if "contentBlockDelta" in e and "citation" in e ["contentBlockDelta" ]["delta" ]
697+ ]
698+ assert len (citation_deltas ) == 0
699+ assert "annotation_type=<file_citation> | unsupported annotation type" in caplog .text
700+
701+
621702@pytest .mark .asyncio
622703async def test_structured_output (openai_client , model , test_output_model_cls , alist ):
623704 messages = [{"role" : "user" , "content" : [{"text" : "Generate a person" }]}]
@@ -886,6 +967,71 @@ def test_format_request_with_tool_choice(model, messages, tool_specs):
886967 assert request ["tool_choice" ] == {"type" : "function" , "name" : "test_tool" }
887968
888969
970+ def test_format_request_merges_builtin_tools_with_function_tools (messages , tool_specs ):
971+ """Test that built-in tools from params are merged with function tools."""
972+ model = OpenAIResponsesModel (
973+ model_id = "gpt-4o" ,
974+ params = {"tools" : [{"type" : "web_search" }]},
975+ )
976+ request = model ._format_request (messages , tool_specs )
977+
978+ assert request ["tools" ] == [
979+ {"type" : "web_search" },
980+ {
981+ "type" : "function" ,
982+ "name" : "test_tool" ,
983+ "description" : "A test tool" ,
984+ "parameters" : {
985+ "type" : "object" ,
986+ "properties" : {"input" : {"type" : "string" }},
987+ "required" : ["input" ],
988+ },
989+ },
990+ ]
991+
992+
993+ def test_format_request_builtin_tools_without_function_tools (messages ):
994+ """Test that built-in tools from params are preserved when no function tools are provided."""
995+ model = OpenAIResponsesModel (
996+ model_id = "gpt-4o" ,
997+ params = {"tools" : [{"type" : "web_search" }]},
998+ )
999+ request = model ._format_request (messages )
1000+
1001+ assert request ["tools" ] == [{"type" : "web_search" }]
1002+
1003+
1004+ def test_format_request_messages_with_citations_content ():
1005+ """Test that citationsContent blocks are converted to text in the request."""
1006+ messages = [
1007+ {"role" : "user" , "content" : [{"text" : "search something" }]},
1008+ {
1009+ "role" : "assistant" ,
1010+ "content" : [
1011+ {
1012+ "citationsContent" : {
1013+ "citations" : [
1014+ {
1015+ "title" : "Example" ,
1016+ "location" : {"web" : {"url" : "https://example.com" , "domain" : "example.com" }},
1017+ "sourceContent" : [{"text" : "cited text" }],
1018+ }
1019+ ],
1020+ "content" : [{"text" : "The answer with citations." }],
1021+ }
1022+ }
1023+ ],
1024+ },
1025+ ]
1026+ formatted = OpenAIResponsesModel ._format_request_messages (messages )
1027+
1028+ assistant_msg = [m for m in formatted if m .get ("role" ) == "assistant" ][0 ]
1029+ assert assistant_msg == {
1030+ "role" : "assistant" ,
1031+ "content" : [{"type" : "output_text" , "text" : "The answer with citations." }],
1032+ }
1033+
1034+
8891035def test_format_request_message_content_image_size_limit ():
8901036 """Test that oversized images raise ValueError."""
8911037 oversized_data = b"x" * (_MAX_MEDIA_SIZE_BYTES + 1 )
0 commit comments