Skip to content

Commit 23e777f

Browse files
committed
some fixes
1 parent c9abb3c commit 23e777f

File tree

11 files changed

+130
-47
lines changed

11 files changed

+130
-47
lines changed

pyproject.toml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -178,6 +178,10 @@ ignore = [
178178

179179
[tool.ruff.lint.per-file-ignores]
180180
"docs/**/*.*" = ["A001"]
181+
# Deferred imports to break circular dependency: utils -> types -> inference_invocation -> utils
182+
"util/opentelemetry-util-genai/src/opentelemetry/util/genai/utils.py" = ["PLC0415"]
183+
# Bottom-of-file import of LLMInvocation for backward-compatibility re-export; cannot move to top
184+
"util/opentelemetry-util-genai/src/opentelemetry/util/genai/types.py" = ["E402"]
181185

182186
[tool.ruff.lint.isort]
183187
detect-same-package = false # to not consider instrumentation packages as first-party

tox.ini

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -789,6 +789,7 @@ deps =
789789

790790
util-genai: {[testenv]test_deps}
791791
util-genai: -r {toxinidir}/util/opentelemetry-util-genai/test-requirements.txt
792+
util-genai: {toxinidir}/util/opentelemetry-util-genai
792793

793794
; FIXME: add coverage testing
794795
allowlist_externals =

util/opentelemetry-util-genai/CHANGELOG.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
2020
- Check if upload works at startup in initializer of the `UploadCompletionHook`, instead
2121
of repeatedly failing on every upload ([https://github.com/open-telemetry/opentelemetry-python-contrib/pull/4390](#4390)).
2222
- Refactor public API: add factory methods (`start_inference`, `start_embedding`, `start_tool`, `start_workflow`) and invocation-owned lifecycle (`invocation.stop()` / `invocation.fail(exc)`); rename `LLMInvocation``InferenceInvocation` and `ToolCall``ToolInvocation`. Existing usages remain fully functional via deprecated aliases.
23-
([#TODO](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/TODO))
23+
([#4391](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/4391))
2424

2525

2626
## Version 0.3b0 (2026-02-20)

util/opentelemetry-util-genai/src/opentelemetry/util/genai/embedding_invocation.py

Lines changed: 13 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -20,9 +20,7 @@
2020
gen_ai_attributes as GenAI,
2121
)
2222
from opentelemetry.semconv.attributes import server_attributes
23-
2423
from opentelemetry.trace import SpanKind
25-
2624
from opentelemetry.util.genai.types import Error, GenAIInvocation
2725

2826
if TYPE_CHECKING:
@@ -56,7 +54,9 @@ def __init__(
5654
metric_attributes: dict[str, Any] | None = None,
5755
) -> None:
5856
"""Use handler.start_embedding(provider) or handler.embedding(provider) instead of calling this directly."""
59-
super().__init__(handler, attributes=attributes, metric_attributes=metric_attributes)
57+
super().__init__(
58+
handler, attributes=attributes, metric_attributes=metric_attributes
59+
)
6060
self.provider = provider # e.g., azure.ai.openai, openai, aws.bedrock
6161
self.request_model = request_model
6262
self.server_address = server_address
@@ -67,7 +67,11 @@ def __init__(
6767
self.input_tokens = input_tokens
6868
self.dimension_count = dimension_count
6969
self.response_model_name = response_model_name
70-
self._span_name = f"{self.operation_name} {request_model}" if request_model else self.operation_name
70+
self._span_name = (
71+
f"{self.operation_name} {request_model}"
72+
if request_model
73+
else self.operation_name
74+
)
7175
self._span_kind = SpanKind.CLIENT
7276
handler._start(self)
7377

@@ -84,7 +88,11 @@ def _apply_finish(self, error: Error | None = None) -> None:
8488
)
8589
attributes: dict[str, Any] = {
8690
GenAI.GEN_AI_OPERATION_NAME: self.operation_name,
87-
**{key: value for key, value in optional_attrs if value is not None},
91+
**{
92+
key: value
93+
for key, value in optional_attrs
94+
if value is not None
95+
},
8896
}
8997
if error is not None:
9098
self._apply_error_attributes(error)

util/opentelemetry-util-genai/src/opentelemetry/util/genai/handler.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,6 @@
4848

4949
from __future__ import annotations
5050

51-
import logging
5251
import timeit
5352
from contextlib import contextmanager
5453
from typing import Iterator, TypeVar
@@ -81,6 +80,7 @@
8180
from opentelemetry.util.genai.version import __version__
8281
from opentelemetry.util.genai.workflow_invocation import WorkflowInvocation
8382

83+
8484
def _safe_detach(invocation: GenAIInvocation) -> None:
8585
"""Detach the context token if still present, as a safety net."""
8686
if invocation._context_token is not None:

util/opentelemetry-util-genai/src/opentelemetry/util/genai/inference_invocation.py

Lines changed: 75 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -19,16 +19,14 @@
1919

2020
from typing_extensions import deprecated
2121

22+
from opentelemetry._logs import LogRecord
23+
from opentelemetry.context import get_current
2224
from opentelemetry.semconv._incubating.attributes import (
2325
gen_ai_attributes as GenAI,
2426
)
2527
from opentelemetry.semconv.attributes import server_attributes
26-
27-
from opentelemetry._logs import LogRecord
28-
from opentelemetry.context import get_current
29-
from opentelemetry.trace import SpanKind
28+
from opentelemetry.trace import INVALID_SPAN, SpanKind
3029
from opentelemetry.trace.propagation import set_span_in_context
31-
3230
from opentelemetry.util.genai.types import (
3331
ContentCapturingMode,
3432
Error,
@@ -86,12 +84,20 @@ def __init__(
8684
metric_attributes: dict[str, Any] | None = None,
8785
) -> None:
8886
"""Use handler.start_inference(provider) or handler.inference(provider) instead of calling this directly."""
89-
super().__init__(handler, attributes=attributes, metric_attributes=metric_attributes)
87+
super().__init__(
88+
handler, attributes=attributes, metric_attributes=metric_attributes
89+
)
9090
self.provider = provider
9191
self.request_model = request_model
92-
self.input_messages: list[InputMessage] = [] if input_messages is None else input_messages
93-
self.output_messages: list[OutputMessage] = [] if output_messages is None else output_messages
94-
self.system_instruction: list[MessagePart] = [] if system_instruction is None else system_instruction
92+
self.input_messages: list[InputMessage] = (
93+
[] if input_messages is None else input_messages
94+
)
95+
self.output_messages: list[OutputMessage] = (
96+
[] if output_messages is None else output_messages
97+
)
98+
self.system_instruction: list[MessagePart] = (
99+
[] if system_instruction is None else system_instruction
100+
)
95101
self.response_model_name = response_model_name
96102
self.response_id = response_id
97103
self.finish_reasons = finish_reasons
@@ -106,7 +112,11 @@ def __init__(
106112
self.seed = seed
107113
self.server_address = server_address
108114
self.server_port = server_port
109-
self._span_name = f"{self.operation_name} {request_model}" if request_model else self.operation_name
115+
self._span_name = (
116+
f"{self.operation_name} {request_model}"
117+
if request_model
118+
else self.operation_name
119+
)
110120
self._span_kind = SpanKind.CLIENT
111121
handler._start(self)
112122

@@ -115,9 +125,15 @@ def _get_message_attributes(self, *, for_span: bool) -> dict[str, Any]:
115125
return {}
116126
mode = get_content_capturing_mode()
117127
allowed_modes = (
118-
(ContentCapturingMode.SPAN_ONLY, ContentCapturingMode.SPAN_AND_EVENT)
128+
(
129+
ContentCapturingMode.SPAN_ONLY,
130+
ContentCapturingMode.SPAN_AND_EVENT,
131+
)
119132
if for_span
120-
else (ContentCapturingMode.EVENT_ONLY, ContentCapturingMode.SPAN_AND_EVENT)
133+
else (
134+
ContentCapturingMode.EVENT_ONLY,
135+
ContentCapturingMode.SPAN_AND_EVENT,
136+
)
121137
)
122138
if mode not in allowed_modes:
123139
return {}
@@ -127,17 +143,38 @@ def serialize(items: list[Any]) -> Any:
127143
return gen_ai_json_dumps(dicts) if for_span else dicts
128144

129145
optional_attrs = (
130-
(GenAI.GEN_AI_INPUT_MESSAGES, serialize(self.input_messages) if self.input_messages else None),
131-
(GenAI.GEN_AI_OUTPUT_MESSAGES, serialize(self.output_messages) if self.output_messages else None),
132-
(GenAI.GEN_AI_SYSTEM_INSTRUCTIONS, serialize(self.system_instruction) if self.system_instruction else None),
146+
(
147+
GenAI.GEN_AI_INPUT_MESSAGES,
148+
serialize(self.input_messages)
149+
if self.input_messages
150+
else None,
151+
),
152+
(
153+
GenAI.GEN_AI_OUTPUT_MESSAGES,
154+
serialize(self.output_messages)
155+
if self.output_messages
156+
else None,
157+
),
158+
(
159+
GenAI.GEN_AI_SYSTEM_INSTRUCTIONS,
160+
serialize(self.system_instruction)
161+
if self.system_instruction
162+
else None,
163+
),
133164
)
134-
return {key: value for key, value in optional_attrs if value is not None}
165+
return {
166+
key: value for key, value in optional_attrs if value is not None
167+
}
135168

136169
def _get_finish_reasons(self) -> list[str] | None:
137170
if self.finish_reasons is not None:
138171
return self.finish_reasons or None
139172
if self.output_messages:
140-
reasons = [msg.finish_reason for msg in self.output_messages if msg.finish_reason]
173+
reasons = [
174+
msg.finish_reason
175+
for msg in self.output_messages
176+
if msg.finish_reason
177+
]
141178
return reasons or None
142179
return None
143180

@@ -188,11 +225,13 @@ def _emit_event(self) -> None:
188225
attributes.update(self._get_message_attributes(for_span=False))
189226
attributes.update(self.attributes)
190227
context = set_span_in_context(self.span, get_current())
191-
self._handler._logger.emit(LogRecord(
192-
event_name="gen_ai.client.inference.operation.details",
193-
attributes=attributes,
194-
context=context,
195-
))
228+
self._handler._logger.emit(
229+
LogRecord(
230+
event_name="gen_ai.client.inference.operation.details",
231+
attributes=attributes,
232+
context=context,
233+
)
234+
)
196235

197236

198237
@deprecated("LLMInvocation is deprecated. Use InferenceInvocation instead.")
@@ -250,19 +289,24 @@ def __init__(
250289
)
251290
return
252291
# Old-style: data container pattern; span started later via handler.start_llm()
253-
from opentelemetry.trace import INVALID_SPAN, SpanKind
254292
self._handler = None
255293
self.attributes = {} if attributes is None else attributes
256-
self.metric_attributes = {} if metric_attributes is None else metric_attributes
294+
self.metric_attributes = (
295+
{} if metric_attributes is None else metric_attributes
296+
)
257297
self.span = INVALID_SPAN
258298
self._span_kind = SpanKind.CLIENT
259299
self._context_token = None
260300
self._monotonic_start_s = None
261301
self.provider = provider
262302
self.request_model = request_model
263303
self.input_messages = [] if input_messages is None else input_messages
264-
self.output_messages = [] if output_messages is None else output_messages
265-
self.system_instruction = [] if system_instruction is None else system_instruction
304+
self.output_messages = (
305+
[] if output_messages is None else output_messages
306+
)
307+
self.system_instruction = (
308+
[] if system_instruction is None else system_instruction
309+
)
266310
self.response_model_name = response_model_name
267311
self.response_id = response_id
268312
self.finish_reasons = finish_reasons
@@ -277,4 +321,8 @@ def __init__(
277321
self.seed = seed
278322
self.server_address = server_address
279323
self.server_port = server_port
280-
self._span_name = f"{self.operation_name} {request_model}" if request_model else self.operation_name
324+
self._span_name = (
325+
f"{self.operation_name} {request_model}"
326+
if request_model
327+
else self.operation_name
328+
)

util/opentelemetry-util-genai/src/opentelemetry/util/genai/tool_invocation.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,9 @@ def __init__(
6262
metric_attributes: dict[str, Any] | None = None,
6363
) -> None:
6464
"""Use handler.start_tool(name) or handler.tool(name) instead of calling this directly."""
65-
super().__init__(handler, attributes=attributes, metric_attributes=metric_attributes)
65+
super().__init__(
66+
handler, attributes=attributes, metric_attributes=metric_attributes
67+
)
6668
self.name = name
6769
self.arguments = arguments
6870
self.tool_call_id = tool_call_id

util/opentelemetry-util-genai/src/opentelemetry/util/genai/types.py

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -269,9 +269,13 @@ def __init__(
269269
) -> None:
270270
self._handler = handler
271271
self._operation_name: str = ""
272-
self.attributes: dict[str, Any] = {} if attributes is None else attributes
272+
self.attributes: dict[str, Any] = (
273+
{} if attributes is None else attributes
274+
)
273275
"""Additional attributes to set on spans and/or events. Not set on metrics."""
274-
self.metric_attributes: dict[str, Any] = {} if metric_attributes is None else metric_attributes
276+
self.metric_attributes: dict[str, Any] = (
277+
{} if metric_attributes is None else metric_attributes
278+
)
275279
"""Additional attributes to set on metrics. Must be low cardinality. Not set on spans or events."""
276280
self.span: Span = _INVALID_SPAN
277281
self._span_name: str = ""
@@ -303,5 +307,5 @@ def fail(self, error: Error | BaseException) -> None:
303307

304308

305309
from opentelemetry.util.genai.inference_invocation import (
306-
LLMInvocation as LLMInvocation, # noqa: F401
310+
LLMInvocation, # noqa: F401
307311
)

util/opentelemetry-util-genai/src/opentelemetry/util/genai/workflow_invocation.py

Lines changed: 18 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@
2121
gen_ai_attributes as GenAI,
2222
)
2323
from opentelemetry.trace import SpanKind
24-
2524
from opentelemetry.util.genai.types import (
2625
ContentCapturingMode,
2726
Error,
@@ -64,11 +63,19 @@ def __init__(
6463
metric_attributes: dict[str, Any] | None = None,
6564
) -> None:
6665
"""Use handler.start_workflow(name) or handler.workflow(name) instead of calling this directly."""
67-
super().__init__(handler, attributes=attributes, metric_attributes=metric_attributes)
66+
super().__init__(
67+
handler, attributes=attributes, metric_attributes=metric_attributes
68+
)
6869
self.name = name
69-
self.input_messages: list[InputMessage] = [] if input_messages is None else input_messages
70-
self.output_messages: list[OutputMessage] = [] if output_messages is None else output_messages
71-
self._span_name = f"invoke_workflow {name}" if name else "invoke_workflow"
70+
self.input_messages: list[InputMessage] = (
71+
[] if input_messages is None else input_messages
72+
)
73+
self.output_messages: list[OutputMessage] = (
74+
[] if output_messages is None else output_messages
75+
)
76+
self._span_name = (
77+
f"invoke_workflow {name}" if name else "invoke_workflow"
78+
)
7279
self._span_kind = SpanKind.INTERNAL
7380
handler._start(self)
7481

@@ -92,10 +99,14 @@ def _get_messages_for_span(self) -> dict[str, Any]:
9299
else None,
93100
),
94101
)
95-
return {key: value for key, value in optional_attrs if value is not None}
102+
return {
103+
key: value for key, value in optional_attrs if value is not None
104+
}
96105

97106
def _apply_finish(self, error: Error | None = None) -> None:
98-
attributes: dict[str, Any] = {GenAI.GEN_AI_OPERATION_NAME: self.operation_name}
107+
attributes: dict[str, Any] = {
108+
GenAI.GEN_AI_OPERATION_NAME: self.operation_name
109+
}
99110
attributes.update(self._get_messages_for_span())
100111
if error is not None:
101112
self._apply_error_attributes(error)

util/opentelemetry-util-genai/tests/test_toolcall.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,9 @@ def test_toolcall_has_attributes_dict():
6060

6161
def test_toolcallrequest_in_message_part_union():
6262
"""ToolCallRequest (not ToolInvocation) is the correct type for message parts"""
63-
tc = ToolCallRequest(name="get_weather", arguments={"city": "Paris"}, id="call_123")
63+
tc = ToolCallRequest(
64+
name="get_weather", arguments={"city": "Paris"}, id="call_123"
65+
)
6466
msg = InputMessage(role="assistant", parts=[tc])
6567
assert len(msg.parts) == 1
6668
assert isinstance(msg.parts[0], ToolCallRequest)

0 commit comments

Comments
 (0)