@@ -167,6 +167,7 @@ def test_nonstreaming_chat_completion_no_prompts(
167167 span = tx ["spans" ][0 ]
168168 assert span ["op" ] == "gen_ai.chat"
169169 assert span ["data" ][SPANDATA .GEN_AI_SYSTEM ] == "openai"
170+ assert span ["data" ][SPANDATA .GEN_AI_RESPONSE_STREAMING ] is False
170171
171172 assert span ["data" ][SPANDATA .GEN_AI_REQUEST_MODEL ] == "some-model"
172173 assert span ["data" ][SPANDATA .GEN_AI_REQUEST_MAX_TOKENS ] == 100
@@ -259,6 +260,7 @@ def test_nonstreaming_chat_completion(sentry_init, capture_events, messages, req
259260 span = tx ["spans" ][0 ]
260261 assert span ["op" ] == "gen_ai.chat"
261262 assert span ["data" ][SPANDATA .GEN_AI_SYSTEM ] == "openai"
263+ assert span ["data" ][SPANDATA .GEN_AI_RESPONSE_STREAMING ] is False
262264
263265 assert span ["data" ][SPANDATA .GEN_AI_REQUEST_MODEL ] == "some-model"
264266 assert span ["data" ][SPANDATA .GEN_AI_REQUEST_MAX_TOKENS ] == 100
@@ -338,6 +340,7 @@ async def test_nonstreaming_chat_completion_async_no_prompts(
338340 span = tx ["spans" ][0 ]
339341 assert span ["op" ] == "gen_ai.chat"
340342 assert span ["data" ][SPANDATA .GEN_AI_SYSTEM ] == "openai"
343+ assert span ["data" ][SPANDATA .GEN_AI_RESPONSE_STREAMING ] is False
341344
342345 assert span ["data" ][SPANDATA .GEN_AI_REQUEST_MODEL ] == "some-model"
343346 assert span ["data" ][SPANDATA .GEN_AI_REQUEST_MAX_TOKENS ] == 100
@@ -430,6 +433,7 @@ async def test_nonstreaming_chat_completion_async(
430433 span = tx ["spans" ][0 ]
431434 assert span ["op" ] == "gen_ai.chat"
432435 assert span ["data" ][SPANDATA .GEN_AI_SYSTEM ] == "openai"
436+ assert span ["data" ][SPANDATA .GEN_AI_RESPONSE_STREAMING ] is False
433437
434438 assert span ["data" ][SPANDATA .GEN_AI_REQUEST_MODEL ] == "some-model"
435439 assert span ["data" ][SPANDATA .GEN_AI_REQUEST_MAX_TOKENS ] == 100
0 commit comments