Skip to content

Commit e146224

Browse files
ericapisaniclaude
andcommitted
ref(anthropic): Simplify finish_reasons to single finish_reason internally
Anthropic only returns a single stop_reason, so track it as a string internally instead of a list. The list wrapping is deferred to _set_output_data where it's set on the span. Also removes an unnecessary getattr guard for event.delta.stop_reason. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
1 parent 9998646 commit e146224

File tree

2 files changed

+22
-27
lines changed

2 files changed

+22
-27
lines changed

sentry_sdk/integrations/anthropic.py

Lines changed: 19 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -159,8 +159,8 @@ def _collect_ai_data(
159159
usage: "_RecordedUsage",
160160
content_blocks: "list[str]",
161161
response_id: "str | None" = None,
162-
finish_reasons: "list[str] | None" = None,
163-
) -> "tuple[str | None, _RecordedUsage, list[str], str | None, list[str] | None]":
162+
finish_reason: "str | None" = None,
163+
) -> "tuple[str | None, _RecordedUsage, list[str], str | None, str | None]":
164164
"""
165165
Collect model information, token usage, and collect content blocks from the AI streaming response.
166166
"""
@@ -198,7 +198,7 @@ def _collect_ai_data(
198198
usage,
199199
content_blocks,
200200
response_id,
201-
finish_reasons,
201+
finish_reason,
202202
)
203203

204204
# Counterintuitive, but message_delta contains cumulative token counts :)
@@ -223,18 +223,17 @@ def _collect_ai_data(
223223
usage.cache_read_input_tokens = cache_read_input_tokens
224224
# TODO: Record event.usage.server_tool_use
225225

226-
stop_reason = getattr(event.delta, "stop_reason", None)
227-
if stop_reason is not None:
228-
finish_reasons = [stop_reason]
226+
if event.delta.stop_reason is not None:
227+
finish_reason = event.delta.stop_reason
229228

230-
return (model, usage, content_blocks, response_id, finish_reasons)
229+
return (model, usage, content_blocks, response_id, finish_reason)
231230

232231
return (
233232
model,
234233
usage,
235234
content_blocks,
236235
response_id,
237-
finish_reasons,
236+
finish_reason,
238237
)
239238

240239

@@ -413,7 +412,7 @@ def _wrap_synchronous_message_iterator(
413412
usage = _RecordedUsage()
414413
content_blocks: "list[str]" = []
415414
response_id = None
416-
finish_reasons = None
415+
finish_reason = None
417416

418417
try:
419418
for event in iterator:
@@ -433,14 +432,14 @@ def _wrap_synchronous_message_iterator(
433432
yield event
434433
continue
435434

436-
(model, usage, content_blocks, response_id, finish_reasons) = (
435+
(model, usage, content_blocks, response_id, finish_reason) = (
437436
_collect_ai_data(
438437
event,
439438
model,
440439
usage,
441440
content_blocks,
442441
response_id,
443-
finish_reasons,
442+
finish_reason,
444443
)
445444
)
446445
yield event
@@ -465,7 +464,7 @@ def _wrap_synchronous_message_iterator(
465464
content_blocks=[{"text": "".join(content_blocks), "type": "text"}],
466465
finish_span=True,
467466
response_id=response_id,
468-
finish_reasons=finish_reasons,
467+
finish_reason=finish_reason,
469468
)
470469

471470

@@ -482,7 +481,7 @@ async def _wrap_asynchronous_message_iterator(
482481
usage = _RecordedUsage()
483482
content_blocks: "list[str]" = []
484483
response_id = None
485-
finish_reasons = None
484+
finish_reason = None
486485

487486
try:
488487
async for event in iterator:
@@ -507,14 +506,14 @@ async def _wrap_asynchronous_message_iterator(
507506
usage,
508507
content_blocks,
509508
response_id,
510-
finish_reasons,
509+
finish_reason,
511510
) = _collect_ai_data(
512511
event,
513512
model,
514513
usage,
515514
content_blocks,
516515
response_id,
517-
finish_reasons,
516+
finish_reason,
518517
)
519518
yield event
520519
finally:
@@ -538,7 +537,7 @@ async def _wrap_asynchronous_message_iterator(
538537
content_blocks=[{"text": "".join(content_blocks), "type": "text"}],
539538
finish_span=True,
540539
response_id=response_id,
541-
finish_reasons=finish_reasons,
540+
finish_reason=finish_reason,
542541
)
543542

544543

@@ -553,15 +552,15 @@ def _set_output_data(
553552
content_blocks: "list[Any]",
554553
finish_span: bool = False,
555554
response_id: "str | None" = None,
556-
finish_reasons: "list[str] | None" = None,
555+
finish_reason: "str | None" = None,
557556
) -> None:
558557
"""
559558
Set output data for the span based on the AI response."""
560559
span.set_data(SPANDATA.GEN_AI_RESPONSE_MODEL, model)
561560
if response_id is not None:
562561
span.set_data(SPANDATA.GEN_AI_RESPONSE_ID, response_id)
563-
if finish_reasons is not None:
564-
span.set_data(SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS, finish_reasons)
562+
if finish_reason is not None:
563+
span.set_data(SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS, [finish_reason])
565564
if should_send_default_pii() and integration.include_prompts:
566565
output_messages: "dict[str, list[Any]]" = {
567566
"response": [],
@@ -655,10 +654,6 @@ def _sentry_patched_create_common(f: "Any", *args: "Any", **kwargs: "Any") -> "A
655654
elif hasattr(content_block, "text"):
656655
content_blocks.append({"type": "text", "text": content_block.text})
657656

658-
finish_reasons = None
659-
if getattr(result, "stop_reason", None) is not None:
660-
finish_reasons = [getattr(result, "stop_reason")]
661-
662657
_set_output_data(
663658
span=span,
664659
integration=integration,
@@ -670,7 +665,7 @@ def _sentry_patched_create_common(f: "Any", *args: "Any", **kwargs: "Any") -> "A
670665
content_blocks=content_blocks,
671666
finish_span=True,
672667
response_id=getattr(result, "id", None),
673-
finish_reasons=finish_reasons,
668+
finish_reason=getattr(result, "stop_reason", None),
674669
)
675670
else:
676671
span.set_data("unknown_response", True)

tests/integrations/anthropic/test_anthropic.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1477,15 +1477,15 @@ def test_collect_ai_data_with_input_json_delta():
14771477

14781478
content_blocks = []
14791479

1480-
model, new_usage, new_content_blocks, response_id, finish_reasons = (
1481-
_collect_ai_data(event, model, usage, content_blocks)
1480+
model, new_usage, new_content_blocks, response_id, finish_reason = _collect_ai_data(
1481+
event, model, usage, content_blocks
14821482
)
14831483
assert model is None
14841484
assert new_usage.input_tokens == usage.input_tokens
14851485
assert new_usage.output_tokens == usage.output_tokens
14861486
assert new_content_blocks == ["test"]
14871487
assert response_id is None
1488-
assert finish_reasons is None
1488+
assert finish_reason is None
14891489

14901490

14911491
@pytest.mark.skipif(

0 commit comments

Comments
 (0)