@@ -83,8 +83,8 @@ class Tracer:
8383 When the OTEL_EXPORTER_OTLP_ENDPOINT environment variable is set, traces
8484 are sent to the OTLP endpoint.
8585
86- Both attributes are controlled by including "gen_ai_latest_experimental" or "gen_ai_tool_definitions",
87- respectively, in the OTEL_SEMCONV_STABILITY_OPT_IN environment variable.
86+ Both attributes are controlled by including "gen_ai_latest_experimental", "gen_ai_tool_definitions",
87+ or "gen_ai_use_latest_invocation_tokens", respectively, in the OTEL_SEMCONV_STABILITY_OPT_IN environment variable.
8888 """
8989
9090 def __init__ (self ) -> None :
@@ -100,6 +100,7 @@ def __init__(self) -> None:
100100 ## To-do: should not set below attributes directly, use env var instead
101101 self .use_latest_genai_conventions = "gen_ai_latest_experimental" in opt_in_values
102102 self ._include_tool_definitions = "gen_ai_tool_definitions" in opt_in_values
103+ self ._use_latest_invocation_tokens = "gen_ai_use_latest_invocation_tokens" in opt_in_values
103104
104105 def _parse_semconv_opt_in (self ) -> set [str ]:
105106 """Parse the OTEL_SEMCONV_STABILITY_OPT_IN environment variable.
@@ -690,16 +691,26 @@ def end_agent_span(
690691 if hasattr (response , "metrics" ) and hasattr (response .metrics , "accumulated_usage" ):
691692 if self .is_langfuse :
692693 attributes .update ({"langfuse.observation.type" : "span" })
693- accumulated_usage = response .metrics .accumulated_usage
694+ if self ._use_latest_invocation_tokens :
695+ latest_invocation = response .metrics .latest_agent_invocation
696+ if latest_invocation is None :
697+ logger .warning (
698+ "latest_agent_invocation is None despite _use_latest_invocation_tokens being set"
699+ )
700+ usage : Usage = Usage (inputTokens = 0 , outputTokens = 0 , totalTokens = 0 )
701+ else :
702+ usage = latest_invocation .usage
703+ else :
704+ usage = response .metrics .accumulated_usage
694705 attributes .update (
695706 {
696- "gen_ai.usage.prompt_tokens" : accumulated_usage ["inputTokens" ],
697- "gen_ai.usage.completion_tokens" : accumulated_usage ["outputTokens" ],
698- "gen_ai.usage.input_tokens" : accumulated_usage ["inputTokens" ],
699- "gen_ai.usage.output_tokens" : accumulated_usage ["outputTokens" ],
700- "gen_ai.usage.total_tokens" : accumulated_usage ["totalTokens" ],
701- "gen_ai.usage.cache_read_input_tokens" : accumulated_usage .get ("cacheReadInputTokens" , 0 ),
702- "gen_ai.usage.cache_write_input_tokens" : accumulated_usage .get ("cacheWriteInputTokens" , 0 ),
707+ "gen_ai.usage.prompt_tokens" : usage ["inputTokens" ],
708+ "gen_ai.usage.completion_tokens" : usage ["outputTokens" ],
709+ "gen_ai.usage.input_tokens" : usage ["inputTokens" ],
710+ "gen_ai.usage.output_tokens" : usage ["outputTokens" ],
711+ "gen_ai.usage.total_tokens" : usage ["totalTokens" ],
712+ "gen_ai.usage.cache_read_input_tokens" : usage .get ("cacheReadInputTokens" , 0 ),
713+ "gen_ai.usage.cache_write_input_tokens" : usage .get ("cacheWriteInputTokens" , 0 ),
703714 }
704715 )
705716
0 commit comments