Skip to content

Commit 7ee6918

Browse files
test(openai): Replace mocks with httpx types for streaming Responses (#5882)
Replace mocks with `httpx` types to avoid test failures when library internals change.
1 parent 9a44b92 commit 7ee6918

File tree

1 file changed

+129
-79
lines changed

1 file changed

+129
-79
lines changed

tests/integrations/openai/test_openai.py

Lines changed: 129 additions & 79 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@
4141
SKIP_RESPONSES_TESTS = True
4242

4343
from sentry_sdk import start_transaction
44-
from sentry_sdk.consts import SPANDATA
44+
from sentry_sdk.consts import SPANDATA, OP
4545
from sentry_sdk.integrations.openai import (
4646
OpenAIIntegration,
4747
_calculate_token_usage,
@@ -2634,7 +2634,14 @@ async def test_ai_client_span_responses_async_api(
26342634
)
26352635
@pytest.mark.skipif(SKIP_RESPONSES_TESTS, reason="Responses API not available")
26362636
async def test_ai_client_span_streaming_responses_async_api(
2637-
sentry_init, capture_events, instructions, input, request, async_iterator
2637+
sentry_init,
2638+
capture_events,
2639+
instructions,
2640+
input,
2641+
request,
2642+
get_model_response,
2643+
async_iterator,
2644+
server_side_event_chunks,
26382645
):
26392646
sentry_init(
26402647
integrations=[OpenAIIntegration(include_prompts=True)],
@@ -2644,28 +2651,32 @@ async def test_ai_client_span_streaming_responses_async_api(
26442651
events = capture_events()
26452652

26462653
client = AsyncOpenAI(api_key="z")
2647-
returned_stream = AsyncStream(cast_to=None, response=None, client=client)
2648-
returned_stream._iterator = async_iterator(EXAMPLE_RESPONSES_STREAM)
2649-
client.responses._post = mock.AsyncMock(return_value=returned_stream)
2654+
returned_stream = get_model_response(
2655+
async_iterator(server_side_event_chunks(EXAMPLE_RESPONSES_STREAM))
2656+
)
26502657

2651-
with start_transaction(name="openai tx"):
2652-
result = await client.responses.create(
2653-
model="gpt-4o",
2654-
instructions=instructions,
2655-
input=input,
2656-
stream=True,
2657-
max_output_tokens=100,
2658-
temperature=0.7,
2659-
top_p=0.9,
2660-
)
2661-
async for _ in result:
2662-
pass
2658+
with mock.patch.object(
2659+
client.responses._client._client,
2660+
"send",
2661+
return_value=returned_stream,
2662+
):
2663+
with start_transaction(name="openai tx"):
2664+
result = await client.responses.create(
2665+
model="gpt-4o",
2666+
instructions=instructions,
2667+
input=input,
2668+
stream=True,
2669+
max_output_tokens=100,
2670+
temperature=0.7,
2671+
top_p=0.9,
2672+
)
2673+
async for _ in result:
2674+
pass
26632675

26642676
(transaction,) = events
2665-
spans = transaction["spans"]
2677+
spans = [span for span in transaction["spans"] if span["op"] == OP.GEN_AI_RESPONSES]
26662678

26672679
assert len(spans) == 1
2668-
assert spans[0]["op"] == "gen_ai.responses"
26692680
assert spans[0]["origin"] == "auto.ai.openai"
26702681

26712682
expected_data = {
@@ -2962,7 +2973,12 @@ async def test_error_in_responses_async_api(sentry_init, capture_events):
29622973
)
29632974
@pytest.mark.skipif(SKIP_RESPONSES_TESTS, reason="Responses API not available")
29642975
def test_streaming_responses_api(
2965-
sentry_init, capture_events, send_default_pii, include_prompts
2976+
sentry_init,
2977+
capture_events,
2978+
send_default_pii,
2979+
include_prompts,
2980+
get_model_response,
2981+
server_side_event_chunks,
29662982
):
29672983
sentry_init(
29682984
integrations=[
@@ -2976,24 +2992,31 @@ def test_streaming_responses_api(
29762992
events = capture_events()
29772993

29782994
client = OpenAI(api_key="z")
2979-
returned_stream = Stream(cast_to=None, response=None, client=client)
2980-
returned_stream._iterator = EXAMPLE_RESPONSES_STREAM
2981-
client.responses._post = mock.Mock(return_value=returned_stream)
2982-
2983-
with start_transaction(name="openai tx"):
2984-
response_stream = client.responses.create(
2985-
model="some-model",
2986-
input="hello",
2987-
stream=True,
2988-
max_output_tokens=100,
2989-
temperature=0.7,
2990-
top_p=0.9,
2995+
returned_stream = get_model_response(
2996+
server_side_event_chunks(
2997+
EXAMPLE_RESPONSES_STREAM,
29912998
)
2999+
)
29923000

2993-
response_string = ""
2994-
for item in response_stream:
2995-
if hasattr(item, "delta"):
2996-
response_string += item.delta
3001+
with mock.patch.object(
3002+
client.responses._client._client,
3003+
"send",
3004+
return_value=returned_stream,
3005+
):
3006+
with start_transaction(name="openai tx"):
3007+
response_stream = client.responses.create(
3008+
model="some-model",
3009+
input="hello",
3010+
stream=True,
3011+
max_output_tokens=100,
3012+
temperature=0.7,
3013+
top_p=0.9,
3014+
)
3015+
3016+
response_string = ""
3017+
for item in response_stream:
3018+
if hasattr(item, "delta"):
3019+
response_string += item.delta
29973020

29983021
assert response_string == "hello world"
29993022

@@ -3026,7 +3049,13 @@ def test_streaming_responses_api(
30263049
)
30273050
@pytest.mark.skipif(SKIP_RESPONSES_TESTS, reason="Responses API not available")
30283051
async def test_streaming_responses_api_async(
3029-
sentry_init, capture_events, send_default_pii, include_prompts, async_iterator
3052+
sentry_init,
3053+
capture_events,
3054+
send_default_pii,
3055+
include_prompts,
3056+
get_model_response,
3057+
async_iterator,
3058+
server_side_event_chunks,
30303059
):
30313060
sentry_init(
30323061
integrations=[
@@ -3040,24 +3069,29 @@ async def test_streaming_responses_api_async(
30403069
events = capture_events()
30413070

30423071
client = AsyncOpenAI(api_key="z")
3043-
returned_stream = AsyncStream(cast_to=None, response=None, client=client)
3044-
returned_stream._iterator = async_iterator(EXAMPLE_RESPONSES_STREAM)
3045-
client.responses._post = AsyncMock(return_value=returned_stream)
3072+
returned_stream = get_model_response(
3073+
async_iterator(server_side_event_chunks(EXAMPLE_RESPONSES_STREAM))
3074+
)
30463075

3047-
with start_transaction(name="openai tx"):
3048-
response_stream = await client.responses.create(
3049-
model="some-model",
3050-
input="hello",
3051-
stream=True,
3052-
max_output_tokens=100,
3053-
temperature=0.7,
3054-
top_p=0.9,
3055-
)
3076+
with mock.patch.object(
3077+
client.responses._client._client,
3078+
"send",
3079+
return_value=returned_stream,
3080+
):
3081+
with start_transaction(name="openai tx"):
3082+
response_stream = await client.responses.create(
3083+
model="some-model",
3084+
input="hello",
3085+
stream=True,
3086+
max_output_tokens=100,
3087+
temperature=0.7,
3088+
top_p=0.9,
3089+
)
30563090

3057-
response_string = ""
3058-
async for item in response_stream:
3059-
if hasattr(item, "delta"):
3060-
response_string += item.delta
3091+
response_string = ""
3092+
async for item in response_stream:
3093+
if hasattr(item, "delta"):
3094+
response_string += item.delta
30613095

30623096
assert response_string == "hello world"
30633097

@@ -3365,7 +3399,9 @@ async def test_streaming_chat_completion_ttft_async(
33653399

33663400
# noinspection PyTypeChecker
33673401
@pytest.mark.skipif(SKIP_RESPONSES_TESTS, reason="Responses API not available")
3368-
def test_streaming_responses_api_ttft(sentry_init, capture_events):
3402+
def test_streaming_responses_api_ttft(
3403+
sentry_init, capture_events, get_model_response, server_side_event_chunks
3404+
):
33693405
"""
33703406
Test that streaming responses API captures time-to-first-token (TTFT).
33713407
"""
@@ -3376,19 +3412,24 @@ def test_streaming_responses_api_ttft(sentry_init, capture_events):
33763412
events = capture_events()
33773413

33783414
client = OpenAI(api_key="z")
3379-
returned_stream = Stream(cast_to=None, response=None, client=client)
3380-
returned_stream._iterator = EXAMPLE_RESPONSES_STREAM
3381-
client.responses._post = mock.Mock(return_value=returned_stream)
3415+
returned_stream = get_model_response(
3416+
server_side_event_chunks(EXAMPLE_RESPONSES_STREAM)
3417+
)
33823418

3383-
with start_transaction(name="openai tx"):
3384-
response_stream = client.responses.create(
3385-
model="some-model",
3386-
input="hello",
3387-
stream=True,
3388-
)
3389-
# Consume the stream
3390-
for _ in response_stream:
3391-
pass
3419+
with mock.patch.object(
3420+
client.responses._client._client,
3421+
"send",
3422+
return_value=returned_stream,
3423+
):
3424+
with start_transaction(name="openai tx"):
3425+
response_stream = client.responses.create(
3426+
model="some-model",
3427+
input="hello",
3428+
stream=True,
3429+
)
3430+
# Consume the stream
3431+
for _ in response_stream:
3432+
pass
33923433

33933434
(tx,) = events
33943435
span = tx["spans"][0]
@@ -3405,7 +3446,11 @@ def test_streaming_responses_api_ttft(sentry_init, capture_events):
34053446
@pytest.mark.asyncio
34063447
@pytest.mark.skipif(SKIP_RESPONSES_TESTS, reason="Responses API not available")
34073448
async def test_streaming_responses_api_ttft_async(
3408-
sentry_init, capture_events, async_iterator
3449+
sentry_init,
3450+
capture_events,
3451+
get_model_response,
3452+
async_iterator,
3453+
server_side_event_chunks,
34093454
):
34103455
"""
34113456
Test that async streaming responses API captures time-to-first-token (TTFT).
@@ -3417,19 +3462,24 @@ async def test_streaming_responses_api_ttft_async(
34173462
events = capture_events()
34183463

34193464
client = AsyncOpenAI(api_key="z")
3420-
returned_stream = AsyncStream(cast_to=None, response=None, client=client)
3421-
returned_stream._iterator = async_iterator(EXAMPLE_RESPONSES_STREAM)
3422-
client.responses._post = AsyncMock(return_value=returned_stream)
3465+
returned_stream = get_model_response(
3466+
async_iterator(server_side_event_chunks(EXAMPLE_RESPONSES_STREAM))
3467+
)
34233468

3424-
with start_transaction(name="openai tx"):
3425-
response_stream = await client.responses.create(
3426-
model="some-model",
3427-
input="hello",
3428-
stream=True,
3429-
)
3430-
# Consume the stream
3431-
async for _ in response_stream:
3432-
pass
3469+
with mock.patch.object(
3470+
client.responses._client._client,
3471+
"send",
3472+
return_value=returned_stream,
3473+
):
3474+
with start_transaction(name="openai tx"):
3475+
response_stream = await client.responses.create(
3476+
model="some-model",
3477+
input="hello",
3478+
stream=True,
3479+
)
3480+
# Consume the stream
3481+
async for _ in response_stream:
3482+
pass
34333483

34343484
(tx,) = events
34353485
span = tx["spans"][0]

0 commit comments

Comments
 (0)