Skip to content
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

### Added

- `opentelemetry-instrumentation-openai-v2`: Add instrumentation for `chat.completions.parse()` structured outputs
([#4416](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/4416))
- Bump `pylint` to `4.0.5`
([#4244](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/4244))
- `opentelemetry-instrumentation-sqlite3`: Add uninstrument, error status, suppress, and no-op tests
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -104,9 +104,25 @@
)


def _is_parse_supported():
"""Check if the parse() method is available on the Completions class.

The parse() method for structured outputs was added in openai >= 1.40.0.
"""
try:
from openai.resources.chat.completions import ( # pylint: disable=import-outside-toplevel # noqa: PLC0415
Completions,
)

return hasattr(Completions, "parse")
except ImportError:
return False


class OpenAIInstrumentor(BaseInstrumentor):
def __init__(self):
self._meter = None
self._parse_supported = False

def instrumentation_dependencies(self) -> Collection[str]:
return _instruments
Expand Down Expand Up @@ -188,10 +204,44 @@ def _instrument(self, **kwargs):
),
)

# parse() wraps create() internally in the OpenAI SDK and returns a
# ParsedChatCompletion. The telemetry-relevant fields (model, usage,
# choices, finish_reason) are identical to ChatCompletion, so the
# existing create() wrappers handle it correctly.
self._parse_supported = _is_parse_supported()
if self._parse_supported:
wrap_function_wrapper(
"openai.resources.chat.completions",
"Completions.parse",
(
chat_completions_create_v_new(handler)
if latest_experimental_enabled
else chat_completions_create_v_old(
tracer, logger, instruments, is_content_enabled()
)
),
)

wrap_function_wrapper(
"openai.resources.chat.completions",
"AsyncCompletions.parse",
(
async_chat_completions_create_v_new(handler)
if latest_experimental_enabled
else async_chat_completions_create_v_old(
tracer, logger, instruments, is_content_enabled()
)
),
)

def _uninstrument(self, **kwargs):
import openai # pylint: disable=import-outside-toplevel # noqa: PLC0415

unwrap(openai.resources.chat.completions.Completions, "create")
unwrap(openai.resources.chat.completions.AsyncCompletions, "create")
unwrap(openai.resources.embeddings.Embeddings, "create")
unwrap(openai.resources.embeddings.AsyncEmbeddings, "create")

if self._parse_supported:
unwrap(openai.resources.chat.completions.Completions, "parse")
unwrap(openai.resources.chat.completions.AsyncCompletions, "parse")
Original file line number Diff line number Diff line change
Expand Up @@ -289,8 +289,18 @@ def get_llm_request_attributes(
else GenAIAttributes.GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT
)
if (response_format := kwargs.get("response_format")) is not None:
# response_format may be string or object with a string in the `type` key
if isinstance(response_format, Mapping):
# response_format may be string, object with a string in the `type` key,
# or a type (e.g. Pydantic model class used with parse())
if isinstance(response_format, type):
if latest_experimental_enabled:
attributes[request_response_format_attr_key] = (
GenAIAttributes.GenAiOutputTypeValues.JSON.value
)
else:
attributes[request_response_format_attr_key] = (
GenAIAttributes.GenAiOpenaiRequestResponseFormatValues.JSON_SCHEMA.value
)
elif isinstance(response_format, Mapping):
if (
response_format_type := response_format.get("type")
) is not None:
Expand Down Expand Up @@ -378,8 +388,13 @@ def create_chat_invocation(
if (
response_format := get_value(kwargs.get("response_format"))
) is not None:
# response_format may be string or object with a string in the `type` key
if isinstance(response_format, Mapping):
# response_format may be string, object with a string in the `type` key,
# or a type (e.g. Pydantic model class used with parse())
if isinstance(response_format, type):
invocation.attributes[GenAIAttributes.GEN_AI_OUTPUT_TYPE] = (
GenAIAttributes.GenAiOutputTypeValues.JSON.value
)
elif isinstance(response_format, Mapping):
if (
response_format_type := get_value(response_format.get("type"))
) is not None:
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
interactions:
- request:
body: |-
{
"messages": [
{
"role": "user",
"content": "Extract the event information from: Team Meeting on 2024-01-15 with Alice and Bob"
}
],
"model": "gpt-4o-mini",
"response_format": {
"type": "json_schema",
"json_schema": {
"name": "CalendarEvent",
"strict": true,
"schema": {
"type": "object",
"properties": {
"name": {"type": "string"},
"date": {"type": "string"},
"participants": {"items": {"type": "string"}, "type": "array"}
},
"required": ["name", "date", "participants"],
"additionalProperties": false
}
}
}
}
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
authorization:
- Bearer test_openai_api_key
connection:
- keep-alive
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.54.3
x-stainless-async:
- async:asyncio
x-stainless-lang:
- python
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: |-
{
"id": "chatcmpl-structured-test-004",
"object": "chat.completion",
"created": 1731368630,
"model": "gpt-4o-mini-2024-07-18",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "{\"name\": \"Team Meeting\", \"date\": \"2024-01-15\", \"participants\": [\"Alice\", \"Bob\"]}",
"refusal": null
},
"logprobs": null,
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": 50,
"completion_tokens": 30,
"total_tokens": 80,
"prompt_tokens_details": {
"cached_tokens": 0,
"audio_tokens": 0
},
"completion_tokens_details": {
"reasoning_tokens": 0,
"audio_tokens": 0,
"accepted_prediction_tokens": 0,
"rejected_prediction_tokens": 0
}
},
"system_fingerprint": "fp_0ba0d124f1"
}
headers:
CF-Cache-Status:
- DYNAMIC
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Mon, 11 Nov 2024 23:43:50 GMT
Server:
- cloudflare
Set-Cookie: test_set_cookie
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
content-length:
- '800'
openai-organization: test_openai_org_id
openai-processing-ms:
- '350'
openai-version:
- '2020-10-01'
x-request-id:
- req_structured_test_004
status:
code: 200
message: OK
version: 1
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
interactions:
- request:
body: |-
{
"messages": [
{
"role": "user",
"content": "Extract the event information from: Team Meeting on 2024-01-15 with Alice and Bob"
}
],
"model": "gpt-4o-mini",
"response_format": {
"type": "json_schema",
"json_schema": {
"name": "CalendarEvent",
"strict": true,
"schema": {
"type": "object",
"properties": {
"name": {"type": "string"},
"date": {"type": "string"},
"participants": {"items": {"type": "string"}, "type": "array"}
},
"required": ["name", "date", "participants"],
"additionalProperties": false
}
}
}
}
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
authorization:
- Bearer test_openai_api_key
connection:
- keep-alive
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.54.3
x-stainless-async:
- async:asyncio
x-stainless-lang:
- python
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: |-
{
"id": "chatcmpl-structured-test-003",
"object": "chat.completion",
"created": 1731368630,
"model": "gpt-4o-mini-2024-07-18",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "{\"name\": \"Team Meeting\", \"date\": \"2024-01-15\", \"participants\": [\"Alice\", \"Bob\"]}",
"refusal": null
},
"logprobs": null,
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": 50,
"completion_tokens": 30,
"total_tokens": 80,
"prompt_tokens_details": {
"cached_tokens": 0,
"audio_tokens": 0
},
"completion_tokens_details": {
"reasoning_tokens": 0,
"audio_tokens": 0,
"accepted_prediction_tokens": 0,
"rejected_prediction_tokens": 0
}
},
"system_fingerprint": "fp_0ba0d124f1"
}
headers:
CF-Cache-Status:
- DYNAMIC
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Mon, 11 Nov 2024 23:43:50 GMT
Server:
- cloudflare
Set-Cookie: test_set_cookie
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
content-length:
- '800'
openai-organization: test_openai_org_id
openai-processing-ms:
- '350'
openai-version:
- '2020-10-01'
x-request-id:
- req_structured_test_003
status:
code: 200
message: OK
version: 1
Loading