Skip to content

Commit d1ae0b2

Browse files
ericapisaniclaude
andcommitted
refactor(openai): Use keyword arguments for _calculate_responses_token_usage call sites
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
1 parent 04c6ede commit d1ae0b2

File tree

1 file changed

+25
-21
lines changed

1 file changed

+25
-21
lines changed

sentry_sdk/integrations/openai.py

Lines changed: 25 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -604,7 +604,11 @@ def _set_common_output_data(
604604
)
605605

606606
_calculate_responses_token_usage(
607-
input, response, span, None, integration.count_tokens
607+
input=input,
608+
response=response,
609+
span=span,
610+
streaming_message_responses=None,
611+
count_tokens=integration.count_tokens,
608612
)
609613

610614
if finish_span:
@@ -871,11 +875,11 @@ def _wrap_synchronous_responses_event_iterator(
871875
span.set_data(SPANDATA.GEN_AI_RESPONSE_MODEL, x.response.model)
872876

873877
_calculate_responses_token_usage(
874-
input,
875-
x.response,
876-
span,
877-
None,
878-
integration.count_tokens,
878+
input=input,
879+
response=x.response,
880+
span=span,
881+
streaming_message_responses=None,
882+
count_tokens=integration.count_tokens,
879883
)
880884
count_tokens_manually = False
881885

@@ -892,11 +896,11 @@ def _wrap_synchronous_responses_event_iterator(
892896
set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, all_responses)
893897
if count_tokens_manually:
894898
_calculate_responses_token_usage(
895-
input,
896-
response,
897-
span,
898-
all_responses,
899-
integration.count_tokens,
899+
input=input,
900+
response=response,
901+
span=span,
902+
streaming_message_responses=all_responses,
903+
count_tokens=integration.count_tokens,
900904
)
901905

902906
if finish_span:
@@ -934,11 +938,11 @@ async def _wrap_asynchronous_responses_event_iterator(
934938
span.set_data(SPANDATA.GEN_AI_RESPONSE_MODEL, x.response.model)
935939

936940
_calculate_responses_token_usage(
937-
input,
938-
x.response,
939-
span,
940-
None,
941-
integration.count_tokens,
941+
input=input,
942+
response=x.response,
943+
span=span,
944+
streaming_message_responses=None,
945+
count_tokens=integration.count_tokens,
942946
)
943947
count_tokens_manually = False
944948

@@ -955,11 +959,11 @@ async def _wrap_asynchronous_responses_event_iterator(
955959
set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, all_responses)
956960
if count_tokens_manually:
957961
_calculate_responses_token_usage(
958-
input,
959-
response,
960-
span,
961-
all_responses,
962-
integration.count_tokens,
962+
input=input,
963+
response=response,
964+
span=span,
965+
streaming_message_responses=all_responses,
966+
count_tokens=integration.count_tokens,
963967
)
964968
if finish_span:
965969
span.__exit__(None, None, None)

0 commit comments

Comments
 (0)