Skip to content

Commit 392ae45

Browse files
committed
fix failing checks and clean up imports
1 parent 941bc17 commit 392ae45

File tree

17 files changed

+137
-158
lines changed

17 files changed

+137
-158
lines changed

instrumentation-genai/AGENTS.md

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,4 +39,3 @@ except Exception as exc:
3939
tests and callers, not in the instrumentation layer.
4040
- When catching exceptions from the underlying library to record telemetry, always re-raise
4141
the original exception unmodified.
42-
- Do not wrap, replace, or suppress exceptions — telemetry must be transparent to callers.

instrumentation-genai/opentelemetry-instrumentation-anthropic/src/opentelemetry/instrumentation/anthropic/patch.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,10 +24,10 @@
2424
gen_ai_attributes as GenAIAttributes,
2525
)
2626
from opentelemetry.util.genai.handler import TelemetryHandler
27-
from opentelemetry.util.genai.inference_invocation import (
27+
from opentelemetry.util.genai.types import (
28+
Error,
2829
LLMInvocation, # pyright: ignore[reportDeprecated] # TODO: migrate to InferenceInvocation
2930
)
30-
from opentelemetry.util.genai.types import Error
3131
from opentelemetry.util.genai.utils import (
3232
should_capture_content_on_spans_in_experimental_mode,
3333
)

instrumentation-genai/opentelemetry-instrumentation-anthropic/src/opentelemetry/instrumentation/anthropic/wrappers.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,11 +19,9 @@
1919
from typing import TYPE_CHECKING, Callable, Iterator, Optional
2020

2121
from opentelemetry.util.genai.handler import TelemetryHandler
22-
from opentelemetry.util.genai.inference_invocation import (
23-
LLMInvocation, # pyright: ignore[reportDeprecated] # TODO: migrate to InferenceInvocation
24-
)
2522
from opentelemetry.util.genai.types import (
2623
Error,
24+
LLMInvocation, # pyright: ignore[reportDeprecated] # TODO: migrate to InferenceInvocation
2725
MessagePart,
2826
OutputMessage,
2927
)

instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/callback_handler.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -25,12 +25,10 @@
2525
_InvocationManager,
2626
)
2727
from opentelemetry.util.genai.handler import TelemetryHandler
28-
from opentelemetry.util.genai.inference_invocation import (
29-
LLMInvocation, # pyright: ignore[reportDeprecated] # TODO: migrate to InferenceInvocation
30-
)
3128
from opentelemetry.util.genai.types import (
3229
Error,
3330
InputMessage,
31+
LLMInvocation, # pyright: ignore[reportDeprecated] # TODO: migrate to InferenceInvocation
3432
MessagePart,
3533
OutputMessage,
3634
Text,
@@ -160,7 +158,7 @@ def on_chat_model_start(
160158
self._invocation_manager.add_invocation_state(
161159
run_id=run_id,
162160
parent_run_id=parent_run_id,
163-
invocation=llm_invocation,
161+
invocation=llm_invocation, # pyright: ignore[reportArgumentType]
164162
)
165163

166164
def on_llm_end(

instrumentation-genai/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/patch.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -33,12 +33,10 @@
3333
from opentelemetry.trace import Span, SpanKind, Tracer
3434
from opentelemetry.trace.propagation import set_span_in_context
3535
from opentelemetry.util.genai.handler import TelemetryHandler
36-
from opentelemetry.util.genai.inference_invocation import (
37-
LLMInvocation, # pylint: disable=no-name-in-module # TODO: migrate to InferenceInvocation
38-
)
3936
from opentelemetry.util.genai.types import (
4037
ContentCapturingMode,
4138
Error,
39+
LLMInvocation, # pylint: disable=no-name-in-module # TODO: migrate to InferenceInvocation
4240
OutputMessage,
4341
Text,
4442
ToolCallRequest,

instrumentation-genai/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/response_wrappers.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,10 +8,10 @@
88
from typing import TYPE_CHECKING, Callable, Generator, Generic, TypeVar
99

1010
from opentelemetry.util.genai.handler import TelemetryHandler
11-
from opentelemetry.util.genai.inference_invocation import (
11+
from opentelemetry.util.genai.types import (
12+
Error,
1213
LLMInvocation, # pylint: disable=no-name-in-module # TODO: migrate to InferenceInvocation
1314
)
14-
from opentelemetry.util.genai.types import Error
1515

1616
# OpenAI Responses internals are version-gated (added in openai>=1.66.0), so
1717
# pylint may not resolve them in all lint environments even though we guard

instrumentation-genai/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/utils.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -36,11 +36,9 @@
3636
error_attributes as ErrorAttributes,
3737
)
3838
from opentelemetry.trace.status import Status, StatusCode
39-
from opentelemetry.util.genai.inference_invocation import (
40-
LLMInvocation, # pylint: disable=no-name-in-module # TODO: migrate to InferenceInvocation
41-
)
4239
from opentelemetry.util.genai.types import (
4340
InputMessage,
41+
LLMInvocation, # pylint: disable=no-name-in-module # TODO: migrate to InferenceInvocation
4442
OutputMessage,
4543
Text,
4644
ToolCallRequest,

util/opentelemetry-util-genai/AGENTS.md

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,6 @@ propagation, so all telemetry calls become no-ops. Always use `handler.start_*()
5454
tests and callers, not telemetry internals.
5555
- When catching exceptions from the underlying library to record telemetry, always re-raise
5656
the original exception unmodified.
57-
- Do not wrap, replace, or suppress exceptions — telemetry must be transparent to callers.
5857

5958
## 4. Documentation
6059

util/opentelemetry-util-genai/src/opentelemetry/util/genai/embedding_invocation.py renamed to util/opentelemetry-util-genai/src/opentelemetry/util/genai/_embedding_invocation.py

File renamed without changes.

util/opentelemetry-util-genai/src/opentelemetry/util/genai/inference_invocation.py renamed to util/opentelemetry-util-genai/src/opentelemetry/util/genai/_inference_invocation.py

Lines changed: 97 additions & 117 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414

1515
from __future__ import annotations
1616

17-
from dataclasses import asdict
17+
from dataclasses import asdict, dataclass, field
1818
from typing import Any
1919

2020
from typing_extensions import deprecated
@@ -24,12 +24,7 @@
2424
gen_ai_attributes as GenAI,
2525
)
2626
from opentelemetry.semconv.attributes import server_attributes
27-
from opentelemetry.trace import (
28-
INVALID_SPAN,
29-
SpanKind,
30-
Tracer,
31-
set_span_in_context,
32-
)
27+
from opentelemetry.trace import INVALID_SPAN, Span, SpanKind, Tracer
3328
from opentelemetry.util.genai._invocation import Error, GenAIInvocation
3429
from opentelemetry.util.genai.metrics import InvocationMetricsRecorder
3530
from opentelemetry.util.genai.types import (
@@ -260,122 +255,107 @@ def _emit_event(self) -> None:
260255

261256

262257
@deprecated("LLMInvocation is deprecated. Use InferenceInvocation instead.")
263-
class LLMInvocation(InferenceInvocation):
264-
"""Deprecated. Use InferenceInvocation instead."""
258+
@dataclass
259+
class LLMInvocation:
260+
"""Deprecated. Use InferenceInvocation instead.
265261
266-
def __init__( # pylint: disable=too-many-locals
267-
self,
268-
tracer: Tracer | None = None,
269-
metrics_recorder: InvocationMetricsRecorder | None = None,
270-
logger: Logger | None = None,
271-
provider: str = "",
272-
*,
273-
request_model: str | None = None,
274-
input_messages: list[InputMessage] | None = None,
275-
output_messages: list[OutputMessage] | None = None,
276-
system_instruction: list[MessagePart] | None = None,
277-
response_model_name: str | None = None,
278-
response_id: str | None = None,
279-
finish_reasons: list[str] | None = None,
280-
input_tokens: int | None = None,
281-
output_tokens: int | None = None,
282-
temperature: float | None = None,
283-
top_p: float | None = None,
284-
frequency_penalty: float | None = None,
285-
presence_penalty: float | None = None,
286-
max_tokens: int | None = None,
287-
stop_sequences: list[str] | None = None,
288-
seed: int | None = None,
289-
server_address: str | None = None,
290-
server_port: int | None = None,
291-
attributes: dict[str, Any] | None = None,
292-
metric_attributes: dict[str, Any] | None = None,
293-
) -> None:
294-
if tracer is not None:
295-
super().__init__(
296-
tracer,
297-
metrics_recorder,
298-
logger,
299-
provider,
300-
request_model=request_model,
301-
input_messages=input_messages,
302-
output_messages=output_messages,
303-
system_instruction=system_instruction,
304-
response_model_name=response_model_name,
305-
response_id=response_id,
306-
finish_reasons=finish_reasons,
307-
input_tokens=input_tokens,
308-
output_tokens=output_tokens,
309-
temperature=temperature,
310-
top_p=top_p,
311-
frequency_penalty=frequency_penalty,
312-
presence_penalty=presence_penalty,
313-
max_tokens=max_tokens,
314-
stop_sequences=stop_sequences,
315-
seed=seed,
316-
server_address=server_address,
317-
server_port=server_port,
318-
attributes=attributes,
319-
metric_attributes=metric_attributes,
320-
)
321-
return
322-
# Old-style: data container, started later via handler.start_llm()
323-
# _tracer/_metrics_recorder/_logger are set by _start_with_handler() in that case
324-
self._operation_name = GenAI.GenAiOperationNameValues.CHAT.value
325-
self._tracer = None
326-
self._metrics_recorder = None
327-
self._logger = None
328-
self.attributes = {} if attributes is None else attributes
329-
self.metric_attributes = (
330-
{} if metric_attributes is None else metric_attributes
331-
)
332-
self.span = INVALID_SPAN
333-
self._span_context = set_span_in_context(INVALID_SPAN)
334-
self._span_kind = SpanKind.CLIENT
335-
self._context_token = None
336-
self._monotonic_start_s = None
337-
self.provider = provider
338-
self.request_model = request_model
339-
self.input_messages = [] if input_messages is None else input_messages
340-
self.output_messages = (
341-
[] if output_messages is None else output_messages
342-
)
343-
self.system_instruction = (
344-
[] if system_instruction is None else system_instruction
345-
)
346-
self.response_model_name = response_model_name
347-
self.response_id = response_id
348-
self.finish_reasons = finish_reasons
349-
self.input_tokens = input_tokens
350-
self.output_tokens = output_tokens
351-
self.temperature = temperature
352-
self.top_p = top_p
353-
self.frequency_penalty = frequency_penalty
354-
self.presence_penalty = presence_penalty
355-
self.max_tokens = max_tokens
356-
self.stop_sequences = stop_sequences
357-
self.seed = seed
358-
self.server_address = server_address
359-
self.server_port = server_port
360-
self._span_name = (
361-
f"{self._operation_name} {request_model}"
362-
if request_model
363-
else self._operation_name
364-
)
262+
Data container for an LLM invocation. Pass to handler.start_llm() to start
263+
the span, then update fields and call handler.stop_llm() or handler.fail_llm().
264+
"""
365265

366-
@property
367-
def invocation(self) -> LLMInvocation | None: # pyright: ignore[reportDeprecated]
368-
"""Returns self once started, None before handler.start_llm() is called."""
369-
return self if self._context_token is not None else None
266+
request_model: str | None = None
267+
input_messages: list[InputMessage] = field(default_factory=list)
268+
output_messages: list[OutputMessage] = field(default_factory=list)
269+
system_instruction: list[MessagePart] = field(default_factory=list)
270+
provider: str | None = None
271+
response_model_name: str | None = None
272+
response_id: str | None = None
273+
finish_reasons: list[str] | None = None
274+
input_tokens: int | None = None
275+
output_tokens: int | None = None
276+
attributes: dict[str, Any] = field(default_factory=dict)
277+
"""Additional attributes to set on spans and/or events. Not set on metrics."""
278+
metric_attributes: dict[str, Any] = field(default_factory=dict)
279+
"""Additional attributes to set on metrics. Must be low cardinality. Not set on spans or events."""
280+
temperature: float | None = None
281+
top_p: float | None = None
282+
frequency_penalty: float | None = None
283+
presence_penalty: float | None = None
284+
max_tokens: int | None = None
285+
stop_sequences: list[str] | None = None
286+
seed: int | None = None
287+
server_address: str | None = None
288+
server_port: int | None = None
289+
290+
_inference_invocation: InferenceInvocation | None = field(
291+
default=None, init=False, repr=False
292+
)
370293

371294
def _start_with_handler(
372295
self,
373296
tracer: Tracer,
374297
metrics_recorder: InvocationMetricsRecorder,
375298
logger: Logger,
376299
) -> None:
377-
"""Attach telemetry components and start the span. Called by handler.start_llm()."""
378-
self._tracer = tracer
379-
self._metrics_recorder = metrics_recorder
380-
self._logger = logger
381-
self._start()
300+
"""Create and start an InferenceInvocation from this data container. Called by handler.start_llm()."""
301+
self._inference_invocation = InferenceInvocation(
302+
tracer,
303+
metrics_recorder,
304+
logger,
305+
self.provider or "",
306+
request_model=self.request_model,
307+
input_messages=self.input_messages,
308+
output_messages=self.output_messages,
309+
system_instruction=self.system_instruction,
310+
response_model_name=self.response_model_name,
311+
response_id=self.response_id,
312+
finish_reasons=self.finish_reasons,
313+
input_tokens=self.input_tokens,
314+
output_tokens=self.output_tokens,
315+
temperature=self.temperature,
316+
top_p=self.top_p,
317+
frequency_penalty=self.frequency_penalty,
318+
presence_penalty=self.presence_penalty,
319+
max_tokens=self.max_tokens,
320+
stop_sequences=self.stop_sequences,
321+
seed=self.seed,
322+
server_address=self.server_address,
323+
server_port=self.server_port,
324+
attributes=self.attributes,
325+
metric_attributes=self.metric_attributes,
326+
)
327+
328+
def _sync_to_invocation(self) -> None:
329+
inv = self._inference_invocation
330+
if inv is None:
331+
return
332+
inv.provider = self.provider or ""
333+
inv.request_model = self.request_model
334+
inv.input_messages = self.input_messages
335+
inv.output_messages = self.output_messages
336+
inv.system_instruction = self.system_instruction
337+
inv.response_model_name = self.response_model_name
338+
inv.response_id = self.response_id
339+
inv.finish_reasons = self.finish_reasons
340+
inv.input_tokens = self.input_tokens
341+
inv.output_tokens = self.output_tokens
342+
inv.temperature = self.temperature
343+
inv.top_p = self.top_p
344+
inv.frequency_penalty = self.frequency_penalty
345+
inv.presence_penalty = self.presence_penalty
346+
inv.max_tokens = self.max_tokens
347+
inv.stop_sequences = self.stop_sequences
348+
inv.seed = self.seed
349+
inv.server_address = self.server_address
350+
inv.server_port = self.server_port
351+
inv.attributes = self.attributes
352+
inv.metric_attributes = self.metric_attributes
353+
354+
@property
355+
def span(self) -> Span:
356+
"""The underlying span, for back-compat with code that checks span.is_recording()."""
357+
return (
358+
self._inference_invocation.span
359+
if self._inference_invocation is not None
360+
else INVALID_SPAN
361+
)

0 commit comments

Comments
 (0)