@@ -217,7 +217,8 @@ def _collect_ai_data(
217217 usage : "_RecordedUsage" ,
218218 content_blocks : "list[str]" ,
219219 response_id : "str | None" = None ,
220- ) -> "tuple[str | None, _RecordedUsage, list[str], str | None]" :
220+ finish_reason : "str | None" = None ,
221+ ) -> "tuple[str | None, _RecordedUsage, list[str], str | None, str | None]" :
221222 """
222223 Collect model information, token usage, and collect content blocks from the AI streaming response.
223224 """
@@ -255,6 +256,7 @@ def _collect_ai_data(
255256 usage ,
256257 content_blocks ,
257258 response_id ,
259+ finish_reason ,
258260 )
259261
260262 # Counterintuitive, but message_delta contains cumulative token counts :)
@@ -279,18 +281,17 @@ def _collect_ai_data(
279281 usage .cache_read_input_tokens = cache_read_input_tokens
280282 # TODO: Record event.usage.server_tool_use
281283
282- return (
283- model ,
284- usage ,
285- content_blocks ,
286- response_id ,
287- )
284+ if event .delta .stop_reason is not None :
285+ finish_reason = event .delta .stop_reason
286+
287+ return (model , usage , content_blocks , response_id , finish_reason )
288288
289289 return (
290290 model ,
291291 usage ,
292292 content_blocks ,
293293 response_id ,
294+ finish_reason ,
294295 )
295296
296297
@@ -500,6 +501,7 @@ def _wrap_synchronous_message_iterator(
500501 stream ._usage ,
501502 stream ._content_blocks ,
502503 stream ._response_id ,
504+ stream ._finish_reason ,
503505 )
504506 stream ._span .__exit__ (* exc_info )
505507 del stream ._span
@@ -550,6 +552,7 @@ async def _wrap_asynchronous_message_iterator(
550552 stream ._usage ,
551553 stream ._content_blocks ,
552554 stream ._response_id ,
555+ stream ._finish_reason ,
553556 )
554557 stream ._span .__exit__ (* exc_info )
555558 del stream ._span
@@ -565,12 +568,15 @@ def _set_output_data(
565568 cache_write_input_tokens : "int | None" ,
566569 content_blocks : "list[Any]" ,
567570 response_id : "str | None" = None ,
571+ finish_reason : "str | None" = None ,
568572) -> None :
569573 """
570574 Set output data for the span based on the AI response."""
571575 span .set_data (SPANDATA .GEN_AI_RESPONSE_MODEL , model )
572576 if response_id is not None :
573577 span .set_data (SPANDATA .GEN_AI_RESPONSE_ID , response_id )
578+ if finish_reason is not None :
579+ span .set_data (SPANDATA .GEN_AI_RESPONSE_FINISH_REASONS , [finish_reason ])
574580 if should_send_default_pii () and integration .include_prompts :
575581 output_messages : "dict[str, list[Any]]" = {
576582 "response" : [],
@@ -664,6 +670,7 @@ def _sentry_patched_create_common(f: "Any", *args: "Any", **kwargs: "Any") -> "A
664670 cache_write_input_tokens = cache_write_input_tokens ,
665671 content_blocks = content_blocks ,
666672 response_id = getattr (result , "id" , None ),
673+ finish_reason = getattr (result , "stop_reason" , None ),
667674 )
668675 span .__exit__ (None , None , None )
669676 else :
@@ -720,6 +727,7 @@ def _initialize_data_accumulation_state(stream: "Union[Stream, MessageStream]")
720727 stream ._usage = _RecordedUsage ()
721728 stream ._content_blocks = []
722729 stream ._response_id = None
730+ stream ._finish_reason = None
723731
724732
725733def _accumulate_event_data (
@@ -729,18 +737,20 @@ def _accumulate_event_data(
729737 """
730738 Update accumulated output from a single stream event.
731739 """
732- (model , usage , content_blocks , response_id ) = _collect_ai_data (
740+ (model , usage , content_blocks , response_id , finish_reason ) = _collect_ai_data (
733741 event ,
734742 stream ._model ,
735743 stream ._usage ,
736744 stream ._content_blocks ,
737745 stream ._response_id ,
746+ stream ._finish_reason ,
738747 )
739748
740749 stream ._model = model
741750 stream ._usage = usage
742751 stream ._content_blocks = content_blocks
743752 stream ._response_id = response_id
753+ stream ._finish_reason = finish_reason
744754
745755
746756def _set_streaming_output_data (
@@ -750,6 +760,7 @@ def _set_streaming_output_data(
750760 usage : "_RecordedUsage" ,
751761 content_blocks : "list[str]" ,
752762 response_id : "Optional[str]" ,
763+ finish_reason : "Optional[str]" ,
753764) -> None :
754765 """
755766 Set output attributes on the AI Client Span.
@@ -772,6 +783,7 @@ def _set_streaming_output_data(
772783 cache_write_input_tokens = usage .cache_write_input_tokens ,
773784 content_blocks = [{"text" : "" .join (content_blocks ), "type" : "text" }],
774785 response_id = response_id ,
786+ finish_reason = finish_reason ,
775787 )
776788
777789
@@ -821,6 +833,7 @@ def __next__(self: "Stream") -> "RawMessageStreamEvent":
821833 self ._usage ,
822834 self ._content_blocks ,
823835 self ._response_id ,
836+ self ._finish_reason ,
824837 )
825838 self ._span .__exit__ (None , None , None )
826839 del self ._span
@@ -854,6 +867,7 @@ def close(self: "Stream") -> None:
854867 self ._usage ,
855868 self ._content_blocks ,
856869 self ._response_id ,
870+ self ._finish_reason ,
857871 )
858872 self ._span .__exit__ (None , None , None )
859873 del self ._span
@@ -949,6 +963,7 @@ async def __anext__(self: "AsyncStream") -> "RawMessageStreamEvent":
949963 self ._usage ,
950964 self ._content_blocks ,
951965 self ._response_id ,
966+ self ._finish_reason ,
952967 )
953968 self ._span .__exit__ (None , None , None )
954969 del self ._span
@@ -982,6 +997,7 @@ async def close(self: "Stream") -> None:
982997 self ._usage ,
983998 self ._content_blocks ,
984999 self ._response_id ,
1000+ self ._finish_reason ,
9851001 )
9861002 self ._span .__exit__ (None , None , None )
9871003 del self ._span
@@ -1114,6 +1130,7 @@ def __next__(self: "MessageStream") -> "MessageStreamEvent":
11141130 self ._usage ,
11151131 self ._content_blocks ,
11161132 self ._response_id ,
1133+ self ._finish_reason ,
11171134 )
11181135 self ._span .__exit__ (None , None , None )
11191136 del self ._span
@@ -1147,6 +1164,7 @@ def close(self: "MessageStream") -> None:
11471164 self ._usage ,
11481165 self ._content_blocks ,
11491166 self ._response_id ,
1167+ self ._finish_reason ,
11501168 )
11511169 self ._span .__exit__ (None , None , None )
11521170 del self ._span
@@ -1287,6 +1305,7 @@ async def __anext__(self: "AsyncMessageStream") -> "MessageStreamEvent":
12871305 self ._usage ,
12881306 self ._content_blocks ,
12891307 self ._response_id ,
1308+ self ._finish_reason ,
12901309 )
12911310 self ._span .__exit__ (None , None , None )
12921311 del self ._span
@@ -1320,6 +1339,7 @@ async def close(self: "AsyncMessageStream") -> None:
13201339 self ._usage ,
13211340 self ._content_blocks ,
13221341 self ._response_id ,
1342+ self ._finish_reason ,
13231343 )
13241344 self ._span .__exit__ (None , None , None )
13251345 del self ._span
0 commit comments