Skip to content

Commit 5a7197e

Browse files
gen-ai instrumentation(feat): OpenAI responses create instrumentation (#4474)
1 parent 0ee6bbd commit 5a7197e

34 files changed

Lines changed: 4861 additions & 653 deletions

File tree

.github/workflows/test.yml

Lines changed: 0 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -214,25 +214,6 @@ jobs:
214214
- name: Run tests
215215
run: tox -e py314-test-instrumentation-openai-v2-latest -- -ra
216216

217-
py313-test-instrumentation-openai-v2-pydantic1_ubuntu-latest:
218-
name: instrumentation-openai-v2-pydantic1 3.13 Ubuntu
219-
runs-on: ubuntu-latest
220-
timeout-minutes: 30
221-
steps:
222-
- name: Checkout repo @ SHA - ${{ github.sha }}
223-
uses: actions/checkout@v4
224-
225-
- name: Set up Python 3.13
226-
uses: actions/setup-python@v5
227-
with:
228-
python-version: "3.13"
229-
230-
- name: Install tox
231-
run: pip install tox-uv
232-
233-
- name: Run tests
234-
run: tox -e py313-test-instrumentation-openai-v2-pydantic1 -- -ra
235-
236217
pypy3-test-instrumentation-openai-v2-oldest_ubuntu-latest:
237218
name: instrumentation-openai-v2-oldest pypy-3.10 Ubuntu
238219
runs-on: ubuntu-latest

instrumentation-genai/opentelemetry-instrumentation-openai-v2/CHANGELOG.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
4141
- Add strongly typed Responses API extractors with validation and content
4242
extraction improvements
4343
([#4337](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/4337))
44+
- Add instrumentation for OpenAI Responses API `create`
45+
([#4474](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/4474))
4446
- Add completion hook support.
4547
([#4315](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/4315))
4648
- Fix `response_format` handling: map `json_object`/`json_schema` to `json` output type.

instrumentation-genai/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/__init__.py

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,7 @@
6464
---
6565
"""
6666

67+
from importlib import import_module
6768
from typing import Collection
6869

6970
from wrapt import wrap_function_wrapper
@@ -91,6 +92,9 @@
9192
chat_completions_create_v_old,
9293
embeddings_create,
9394
)
95+
from .patch_responses import (
96+
responses_create,
97+
)
9498

9599

96100
class OpenAIInstrumentor(BaseInstrumentor):
@@ -177,10 +181,33 @@ def _instrument(self, **kwargs):
177181
),
178182
)
179183

184+
responses_module = _get_responses_module()
185+
# Responses instrumentation is intentionally limited to the latest
186+
# experimental semconv path. Unlike chat completions, we do not carry
187+
# a second legacy wrapper here; the current implementation is built on
188+
# the inference handler lifecycle and would need a separate old-path
189+
# implementation to support legacy semconv mode.
190+
if responses_module is not None and latest_experimental_enabled:
191+
wrap_function_wrapper(
192+
"openai.resources.responses.responses",
193+
"Responses.create",
194+
responses_create(handler),
195+
)
196+
180197
def _uninstrument(self, **kwargs):
181198
import openai # pylint: disable=import-outside-toplevel # noqa: PLC0415
182199

183200
unwrap(openai.resources.chat.completions.Completions, "create")
184201
unwrap(openai.resources.chat.completions.AsyncCompletions, "create")
185202
unwrap(openai.resources.embeddings.Embeddings, "create")
186203
unwrap(openai.resources.embeddings.AsyncEmbeddings, "create")
204+
responses_module = _get_responses_module()
205+
if responses_module is not None:
206+
unwrap(responses_module.Responses, "create")
207+
208+
209+
def _get_responses_module():
210+
try:
211+
return import_module("openai.resources.responses.responses")
212+
except ImportError:
213+
return None
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,57 @@
1+
# Copyright The OpenTelemetry Authors
2+
# SPDX-License-Identifier: Apache-2.0
3+
4+
from __future__ import annotations
5+
6+
from opentelemetry.util.genai.handler import TelemetryHandler
7+
from opentelemetry.util.genai.types import Error
8+
9+
from .response_extractors import (
10+
apply_request_attributes,
11+
extract_params,
12+
get_inference_creation_kwargs,
13+
set_invocation_response_attributes,
14+
)
15+
from .response_wrappers import ResponseStreamWrapper
16+
from .utils import is_streaming
17+
18+
19+
def responses_create(handler: TelemetryHandler):
20+
"""Wrap the `create` method of the `Responses` class to trace it."""
21+
22+
capture_content = handler.should_capture_content()
23+
24+
def traced_method(wrapped, instance, args, kwargs):
25+
params = extract_params(**kwargs)
26+
invocation = handler.start_inference(
27+
**get_inference_creation_kwargs(params, instance)
28+
)
29+
apply_request_attributes(invocation, params, capture_content)
30+
31+
try:
32+
result = wrapped(*args, **kwargs)
33+
parsed_result = _get_response_stream_result(result)
34+
35+
if is_streaming(kwargs):
36+
return ResponseStreamWrapper(
37+
parsed_result,
38+
invocation,
39+
capture_content,
40+
)
41+
42+
set_invocation_response_attributes(
43+
invocation, parsed_result, capture_content
44+
)
45+
invocation.stop()
46+
return result
47+
except Exception as error:
48+
invocation.fail(Error(type=type(error), message=str(error)))
49+
raise
50+
51+
return traced_method
52+
53+
54+
def _get_response_stream_result(result):
55+
if hasattr(result, "parse"):
56+
return result.parse()
57+
return result

0 commit comments

Comments
 (0)