@@ -126,7 +126,8 @@ def _collect_ai_data(
126126 model : "str | None" ,
127127 usage : "_RecordedUsage" ,
128128 content_blocks : "list[str]" ,
129- ) -> "tuple[str | None, _RecordedUsage, list[str]]" :
129+ response_id : "str | None" = None ,
130+ ) -> "tuple[str | None, _RecordedUsage, list[str], str | None]" :
130131 """
131132 Collect model information, token usage, and collect content blocks from the AI streaming response.
132133 """
@@ -146,6 +147,7 @@ def _collect_ai_data(
146147 # https://github.com/anthropics/anthropic-sdk-python/blob/9c485f6966e10ae0ea9eabb3a921d2ea8145a25b/src/anthropic/lib/streaming/_messages.py#L433-L518
147148 if event .type == "message_start" :
148149 model = event .message .model or model
150+ response_id = getattr (event .message , "id" , None ) or response_id
149151
150152 incoming_usage = event .message .usage
151153 usage .output_tokens = incoming_usage .output_tokens
@@ -162,6 +164,7 @@ def _collect_ai_data(
162164 model ,
163165 usage ,
164166 content_blocks ,
167+ response_id ,
165168 )
166169
167170 # Counterintuitive, but message_delta contains cumulative token counts :)
@@ -190,12 +193,14 @@ def _collect_ai_data(
190193 model ,
191194 usage ,
192195 content_blocks ,
196+ response_id ,
193197 )
194198
195199 return (
196200 model ,
197201 usage ,
198202 content_blocks ,
203+ response_id ,
199204 )
200205
201206
@@ -348,10 +353,13 @@ def _set_output_data(
348353 cache_write_input_tokens : "int | None" ,
349354 content_blocks : "list[Any]" ,
350355 finish_span : bool = False ,
356+ response_id : "str | None" = None ,
351357) -> None :
352358 """
353359 Set output data for the span based on the AI response."""
354360 span .set_data (SPANDATA .GEN_AI_RESPONSE_MODEL , model )
361+ if response_id is not None :
362+ span .set_data (SPANDATA .GEN_AI_RESPONSE_ID , response_id )
355363 if should_send_default_pii () and integration .include_prompts :
356364 output_messages : "dict[str, list[Any]]" = {
357365 "response" : [],
@@ -443,6 +451,7 @@ def _sentry_patched_create_common(f: "Any", *args: "Any", **kwargs: "Any") -> "A
443451 cache_write_input_tokens = cache_write_input_tokens ,
444452 content_blocks = content_blocks ,
445453 finish_span = True ,
454+ response_id = getattr (result , "id" , None ),
446455 )
447456
448457 # Streaming response
@@ -453,17 +462,20 @@ def new_iterator() -> "Iterator[MessageStreamEvent]":
453462 model = None
454463 usage = _RecordedUsage ()
455464 content_blocks : "list[str]" = []
465+ response_id = None
456466
457467 for event in old_iterator :
458468 (
459469 model ,
460470 usage ,
461471 content_blocks ,
472+ response_id ,
462473 ) = _collect_ai_data (
463474 event ,
464475 model ,
465476 usage ,
466477 content_blocks ,
478+ response_id ,
467479 )
468480 yield event
469481
@@ -485,23 +497,27 @@ def new_iterator() -> "Iterator[MessageStreamEvent]":
485497 cache_write_input_tokens = usage .cache_write_input_tokens ,
486498 content_blocks = [{"text" : "" .join (content_blocks ), "type" : "text" }],
487499 finish_span = True ,
500+ response_id = response_id ,
488501 )
489502
490503 async def new_iterator_async () -> "AsyncIterator[MessageStreamEvent]" :
491504 model = None
492505 usage = _RecordedUsage ()
493506 content_blocks : "list[str]" = []
507+ response_id = None
494508
495509 async for event in old_iterator :
496510 (
497511 model ,
498512 usage ,
499513 content_blocks ,
514+ response_id ,
500515 ) = _collect_ai_data (
501516 event ,
502517 model ,
503518 usage ,
504519 content_blocks ,
520+ response_id ,
505521 )
506522 yield event
507523
@@ -523,6 +539,7 @@ async def new_iterator_async() -> "AsyncIterator[MessageStreamEvent]":
523539 cache_write_input_tokens = usage .cache_write_input_tokens ,
524540 content_blocks = [{"text" : "" .join (content_blocks ), "type" : "text" }],
525541 finish_span = True ,
542+ response_id = response_id ,
526543 )
527544
528545 if str (type (result ._iterator )) == "<class 'async_generator'>" :
0 commit comments