Skip to content

Commit ce90618

Browse files
authored
Merge branch 'main' into otelbot/update-opentelemetry-util-genai-version-to-v0.5b0
2 parents caa3d5e + 7e0d131 commit ce90618

11 files changed

Lines changed: 74 additions & 9 deletions

File tree

CONTRIBUTING.md

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -206,7 +206,13 @@ git commit
206206
git push fork feature
207207
```
208208

209-
Open a pull request against the main `opentelemetry-python-contrib` repo.
209+
Open a pull request (PR) against the main `opentelemetry-python-contrib` repo.
210+
211+
A descriptive PR title will help the community better triage and review your changes. Make sure to prefix with the name(s) of the package/subdirectory/domain that your PR updates. Following any of these examples will help:
212+
213+
* "opentelemetry-instrumentation-dbapi: add client operation duration metrics"
214+
* "GenAI Utils: Add _BaseAgent base class and agent creation lifecycle"
215+
* "docs(google-genai): document config recording environment variables"
210216

211217
### How to Receive Comments
212218

instrumentation-genai/opentelemetry-instrumentation-anthropic/CHANGELOG.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
77

88
## Unreleased
99

10+
- Update `opentelemetry-util-genai` dependency range to `>= 0.4b0.dev, <0.5b0`
11+
([#4520](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/4520))
1012
- Fix compatibility with wrapt 2.x by using positional arguments in `wrap_function_wrapper()` calls
1113
([#4445](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/4445))
1214

instrumentation-genai/opentelemetry-instrumentation-anthropic/pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ dependencies = [
2828
"opentelemetry-api ~= 1.39",
2929
"opentelemetry-instrumentation ~= 0.60b0",
3030
"opentelemetry-semantic-conventions ~= 0.60b0",
31-
"opentelemetry-util-genai >= 0.2b0, <0.4b0",
31+
"opentelemetry-util-genai >= 0.4b0.dev, <0.5b0",
3232
]
3333

3434
[project.optional-dependencies]

instrumentation-genai/opentelemetry-instrumentation-anthropic/tests/requirements.oldest.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
# This variant of the requirements aims to test the system using
1616
# the oldest supported version of external dependencies.
1717

18-
-e util/opentelemetry-util-genai
18+
-e util/opentelemetry-util-genai # todo: update to 0.4b0 when it's released
1919
anthropic==0.51.0
2020
pytest==7.4.4
2121
pytest-vcr==1.0.2

instrumentation-genai/opentelemetry-instrumentation-claude-agent-sdk/CHANGELOG.md

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,5 +7,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
77

88
## Unreleased
99

10+
- Update `opentelemetry-util-genai` dependency range to `>= 0.4b0.dev, <0.5b0`
11+
([#4520](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/4520))
12+
1013
### Added
1114

instrumentation-genai/opentelemetry-instrumentation-claude-agent-sdk/pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ dependencies = [
2727
"opentelemetry-api ~= 1.39",
2828
"opentelemetry-instrumentation ~= 0.60b0",
2929
"opentelemetry-semantic-conventions ~= 0.60b0",
30-
"opentelemetry-util-genai >= 0.2b0, <0.4b0",
30+
"opentelemetry-util-genai >= 0.2b0, <0.5b0",
3131
]
3232

3333
[project.optional-dependencies]

instrumentation-genai/opentelemetry-instrumentation-claude-agent-sdk/tests/requirements.oldest.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@
1515
# This variant of the requirements aims to test the system using
1616
# the oldest supported version of external dependencies.
1717

18-
-e util/opentelemetry-util-genai
1918
claude-agent-sdk==0.1.14
2019
pytest==7.4.4
2120
pytest-vcr==1.0.2
@@ -24,5 +23,6 @@ wrapt==1.16.0
2423
opentelemetry-api==1.39 # when updating, also update in pyproject.toml
2524
opentelemetry-sdk==1.39 # when updating, also update in pyproject.toml
2625
opentelemetry-semantic-conventions==0.60b0 # when updating, also update in pyproject.toml
26+
opentelemetry-util-genai==0.2b0 # when updating, also update in pyproject.toml
2727

2828
-e instrumentation-genai/opentelemetry-instrumentation-claude-agent-sdk

instrumentation-genai/opentelemetry-instrumentation-google-genai/CHANGELOG.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
77

88
## Unreleased
99

10+
-Add `gen_ai.usage.reasoning.output_tokens` attribute to capture thinking tokens on spans/events when the experimental sem conv flag is set. Add thinking tokens to output tokens. ([#4313](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/4313))
1011
-Add `gen_ai.usage.cache_read.input_tokens` attribute to capture cached tokens on spans/events when the experimental sem conv flag is set. ([#4313](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/4313))
1112

1213
## Version 0.7b0 (2026-02-20)

instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -525,6 +525,7 @@ def __init__(
525525
self._error_type = None
526526
self._input_tokens = 0
527527
self._cached_tokens = 0
528+
self._thinking_tokens = 0
528529
self._output_tokens = 0
529530
sem_conv_opt_in_mode = _OpenTelemetrySemanticConventionStability._get_opentelemetry_stability_opt_in_mode(
530531
_OpenTelemetryStabilitySignalType.GEN_AI
@@ -633,12 +634,21 @@ def _maybe_update_token_counts(self, response: GenerateContentResponse):
633634
cached_tokens = _get_response_property(
634635
response, "usage_metadata.cached_content_token_count"
635636
)
637+
thinking_tokens = _get_response_property(
638+
response, "usage_metadata.thoughts_token_count"
639+
)
636640
if cached_tokens and isinstance(cached_tokens, int):
637641
self._cached_tokens = cached_tokens
638642
if input_tokens and isinstance(input_tokens, int):
639643
self._input_tokens = input_tokens
640644
if output_tokens and isinstance(output_tokens, int):
641645
self._output_tokens = output_tokens
646+
if thinking_tokens and isinstance(thinking_tokens, int):
647+
# Pricing of tokens is the sum of output tokens and thinking tokens:
648+
# https://ai.google.dev/gemini-api/docs/thinking#pricing
649+
# Also the sem conv recommends combining these counts.
650+
self._output_tokens += thinking_tokens
651+
self._thinking_tokens = thinking_tokens
642652

643653
def _maybe_update_error_type(self, response: GenerateContentResponse):
644654
if response.candidates:
@@ -778,6 +788,14 @@ def _maybe_log_completion_details(
778788
event.attributes[
779789
gen_ai_attributes.GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS
780790
] = self._cached_tokens
791+
# TODO: replace these strings with the sem conv constant in `gen_ai_attributes` once it becomes available.
792+
span.set_attribute(
793+
"gen_ai.usage.reasoning.output_tokens",
794+
self._thinking_tokens,
795+
)
796+
event.attributes["gen_ai.usage.reasoning.output_tokens"] = (
797+
self._thinking_tokens
798+
)
781799
tool_definitions = tool_definitions or []
782800
self.completion_hook.on_completion(
783801
inputs=input_messages,

instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py

Lines changed: 36 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -261,17 +261,25 @@ def test_generated_span_has_vertex_ai_system_when_configured(self):
261261

262262
def test_generated_span_counts_tokens(self):
263263
self.configure_valid_response(
264-
input_tokens=123, output_tokens=456, cached_tokens=50
264+
input_tokens=123,
265+
output_tokens=456,
266+
cached_tokens=50,
267+
thinking_tokens=17,
265268
)
266269
self.generate_content(model="gemini-2.0-flash", contents="Some input")
267270
self.otel.assert_has_span_named("generate_content gemini-2.0-flash")
268271
span = self.otel.get_span_named("generate_content gemini-2.0-flash")
269272
self.assertEqual(span.attributes["gen_ai.usage.input_tokens"], 123)
270-
self.assertEqual(span.attributes["gen_ai.usage.output_tokens"], 456)
273+
self.assertEqual(
274+
span.attributes["gen_ai.usage.output_tokens"], 456 + 17
275+
)
271276
# New sem conv should not appear when flag is not experimental mode..
272277
self.assertNotIn(
273278
"gen_ai.usage.cache_read.input_tokens", span.attributes
274279
)
280+
self.assertNotIn(
281+
"gen_ai.usage.reasoning.output_tokens", span.attributes
282+
)
275283

276284
@patch.dict(
277285
"os.environ",
@@ -452,7 +460,9 @@ def test_new_semconv_record_completion_as_log(self):
452460
self.setUp()
453461
with patched_environ, patched_otel_mapping:
454462
self.configure_valid_response(
455-
text=output, cached_tokens=50
463+
text=output,
464+
cached_tokens=50,
465+
thinking_tokens=17,
456466
)
457467
self.generate_content(
458468
model="gemini-2.0-flash",
@@ -475,6 +485,16 @@ def test_new_semconv_record_completion_as_log(self):
475485
],
476486
50,
477487
)
488+
self.assertEqual(
489+
event.attributes[
490+
"gen_ai.usage.reasoning.output_tokens"
491+
],
492+
17,
493+
)
494+
self.assertEqual(
495+
event.attributes["gen_ai.usage.output_tokens"],
496+
17,
497+
)
478498
assert (
479499
event.attributes[
480500
"gcp.gen_ai.operation.config.response_schema"
@@ -780,7 +800,9 @@ def test_new_semconv_record_completion_in_span(self):
780800
self.setUp()
781801
with patched_environ, patched_otel_mapping:
782802
self.configure_valid_response(
783-
text="Some response content", cached_tokens=50
803+
text="Some response content",
804+
cached_tokens=50,
805+
thinking_tokens=19,
784806
)
785807
self.generate_content(
786808
model="gemini-2.0-flash",
@@ -800,6 +822,16 @@ def test_new_semconv_record_completion_in_span(self):
800822
],
801823
50,
802824
)
825+
self.assertEqual(
826+
span.attributes[
827+
"gen_ai.usage.reasoning.output_tokens"
828+
],
829+
19,
830+
)
831+
self.assertEqual(
832+
span.attributes["gen_ai.usage.output_tokens"],
833+
19,
834+
)
803835
if mode in [
804836
ContentCapturingMode.SPAN_ONLY,
805837
ContentCapturingMode.SPAN_AND_EVENT,

0 commit comments

Comments
 (0)