Skip to content

Commit ebbdc72

Browse files
committed
align parse() wrapping with positional args after #4445 rebase
1 parent fb4a084 commit ebbdc72

4 files changed

Lines changed: 62 additions & 26 deletions

File tree

instrumentation-genai/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/__init__.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -211,10 +211,10 @@ def _instrument(self, **kwargs):
211211
self._parse_supported = _is_parse_supported()
212212
if self._parse_supported:
213213
wrap_function_wrapper(
214-
module="openai.resources.chat.completions",
215-
name="Completions.parse",
216-
wrapper=(
217-
chat_completions_create_v_new(handler, content_mode)
214+
"openai.resources.chat.completions",
215+
"Completions.parse",
216+
(
217+
chat_completions_create_v_new(handler)
218218
if latest_experimental_enabled
219219
else chat_completions_create_v_old(
220220
tracer, logger, instruments, is_content_enabled()
@@ -223,10 +223,10 @@ def _instrument(self, **kwargs):
223223
)
224224

225225
wrap_function_wrapper(
226-
module="openai.resources.chat.completions",
227-
name="AsyncCompletions.parse",
228-
wrapper=(
229-
async_chat_completions_create_v_new(handler, content_mode)
226+
"openai.resources.chat.completions",
227+
"AsyncCompletions.parse",
228+
(
229+
async_chat_completions_create_v_new(handler)
230230
if latest_experimental_enabled
231231
else async_chat_completions_create_v_old(
232232
tracer, logger, instruments, is_content_enabled()

instrumentation-genai/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/utils.py

Lines changed: 30 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -164,7 +164,9 @@ def choice_to_event(choice, capture_content):
164164
if choice.message:
165165
message = {
166166
"role": (
167-
choice.message.role if choice.message and choice.message.role else None
167+
choice.message.role
168+
if choice.message and choice.message.role
169+
else None
168170
)
169171
}
170172
tool_calls = extract_tool_calls(choice.message, capture_content)
@@ -249,10 +251,14 @@ def get_llm_request_attributes(
249251
if operation_name == GenAIAttributes.GenAiOperationNameValues.CHAT.value:
250252
attributes.update(
251253
{
252-
GenAIAttributes.GEN_AI_REQUEST_TEMPERATURE: kwargs.get("temperature"),
254+
GenAIAttributes.GEN_AI_REQUEST_TEMPERATURE: kwargs.get(
255+
"temperature"
256+
),
253257
GenAIAttributes.GEN_AI_REQUEST_TOP_P: kwargs.get("p")
254258
or kwargs.get("top_p"),
255-
GenAIAttributes.GEN_AI_REQUEST_MAX_TOKENS: kwargs.get("max_tokens"),
259+
GenAIAttributes.GEN_AI_REQUEST_MAX_TOKENS: kwargs.get(
260+
"max_tokens"
261+
),
256262
GenAIAttributes.GEN_AI_REQUEST_PRESENCE_PENALTY: kwargs.get(
257263
"presence_penalty"
258264
),
@@ -266,12 +272,16 @@ def get_llm_request_attributes(
266272
if (choice_count := kwargs.get("n")) is not None:
267273
# Only add non default, meaningful values
268274
if isinstance(choice_count, int) and choice_count != 1:
269-
attributes[GenAIAttributes.GEN_AI_REQUEST_CHOICE_COUNT] = choice_count
275+
attributes[GenAIAttributes.GEN_AI_REQUEST_CHOICE_COUNT] = (
276+
choice_count
277+
)
270278

271279
if (stop_sequences := kwargs.get("stop")) is not None:
272280
if isinstance(stop_sequences, str):
273281
stop_sequences = [stop_sequences]
274-
attributes[GenAIAttributes.GEN_AI_REQUEST_STOP_SEQUENCES] = stop_sequences
282+
attributes[GenAIAttributes.GEN_AI_REQUEST_STOP_SEQUENCES] = (
283+
stop_sequences
284+
)
275285

276286
request_response_format_attr_key = (
277287
GenAIAttributes.GEN_AI_OUTPUT_TYPE
@@ -317,7 +327,10 @@ def get_llm_request_attributes(
317327
)
318328

319329
# Add embeddings-specific attributes
320-
elif operation_name == GenAIAttributes.GenAiOperationNameValues.EMBEDDINGS.value:
330+
elif (
331+
operation_name
332+
== GenAIAttributes.GenAiOperationNameValues.EMBEDDINGS.value
333+
):
321334
# Add embedding dimensions if specified
322335
if (dimensions := kwargs.get("dimensions")) is not None:
323336
# TODO: move to GEN_AI_EMBEDDINGS_DIMENSION_COUNT when 1.39.0 is baseline
@@ -372,7 +385,9 @@ def create_chat_invocation(
372385
GenAIAttributes.GEN_AI_REQUEST_CHOICE_COUNT
373386
] = choice_count
374387

375-
if (response_format := get_value(kwargs.get("response_format"))) is not None:
388+
if (
389+
response_format := get_value(kwargs.get("response_format"))
390+
) is not None:
376391
# response_format may be string, object with a string in the `type` key,
377392
# or a type (e.g. Pydantic model class used with parse())
378393
if isinstance(response_format, type):
@@ -420,13 +435,16 @@ def get_value(v: Any):
420435
def handle_span_exception(span, error: BaseException):
421436
span.set_status(Status(StatusCode.ERROR, str(error)))
422437
if span.is_recording():
423-
span.set_attribute(ErrorAttributes.ERROR_TYPE, type(error).__qualname__)
438+
span.set_attribute(
439+
ErrorAttributes.ERROR_TYPE, type(error).__qualname__
440+
)
424441
span.end()
425442

426443

427444
def _is_text_part(content: Any) -> bool:
428445
return isinstance(content, str) or (
429-
isinstance(content, Iterable) and all(isinstance(part, str) for part in content)
446+
isinstance(content, Iterable)
447+
and all(isinstance(part, str) for part in content)
430448
)
431449

432450

@@ -477,7 +495,9 @@ def extract_tool_calls_new(tool_calls) -> list[ToolCallRequest]:
477495
arguments = arguments_str
478496

479497
# TODO: support custom
480-
parts.append(ToolCallRequest(id=call_id, name=func_name, arguments=arguments))
498+
parts.append(
499+
ToolCallRequest(id=call_id, name=func_name, arguments=arguments)
500+
)
481501
return parts
482502

483503

instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_async_structured_outputs.py

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -82,14 +82,18 @@ async def test_async_structured_output_with_content(
8282
)
8383
assert_messages_attribute(
8484
spans[0].attributes["gen_ai.output.messages"],
85-
format_simple_expected_output_message(response.choices[0].message.content),
85+
format_simple_expected_output_message(
86+
response.choices[0].message.content
87+
),
8688
)
8789
else:
8890
logs = log_exporter.get_finished_logs()
8991
assert len(logs) == 2
9092

9193
user_message = {"content": STRUCTURED_OUTPUT_PROMPT[0]["content"]}
92-
assert_message_in_logs(logs[0], "gen_ai.user.message", user_message, spans[0])
94+
assert_message_in_logs(
95+
logs[0], "gen_ai.user.message", user_message, spans[0]
96+
)
9397

9498
choice_event = {
9599
"index": 0,
@@ -99,7 +103,9 @@ async def test_async_structured_output_with_content(
99103
"content": response.choices[0].message.content,
100104
},
101105
}
102-
assert_message_in_logs(logs[1], "gen_ai.choice", choice_event, spans[0])
106+
assert_message_in_logs(
107+
logs[1], "gen_ai.choice", choice_event, spans[0]
108+
)
103109

104110

105111
@pytest.mark.asyncio()
@@ -157,4 +163,6 @@ async def test_async_structured_output_no_content(
157163
"finish_reason": "stop",
158164
"message": {"role": "assistant"},
159165
}
160-
assert_message_in_logs(logs[1], "gen_ai.choice", choice_event, spans[0])
166+
assert_message_in_logs(
167+
logs[1], "gen_ai.choice", choice_event, spans[0]
168+
)

instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_structured_outputs.py

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -75,14 +75,18 @@ def test_structured_output_with_content(
7575
)
7676
assert_messages_attribute(
7777
spans[0].attributes["gen_ai.output.messages"],
78-
format_simple_expected_output_message(response.choices[0].message.content),
78+
format_simple_expected_output_message(
79+
response.choices[0].message.content
80+
),
7981
)
8082
else:
8183
logs = log_exporter.get_finished_logs()
8284
assert len(logs) == 2
8385

8486
user_message = {"content": STRUCTURED_OUTPUT_PROMPT[0]["content"]}
85-
assert_message_in_logs(logs[0], "gen_ai.user.message", user_message, spans[0])
87+
assert_message_in_logs(
88+
logs[0], "gen_ai.user.message", user_message, spans[0]
89+
)
8690

8791
choice_event = {
8892
"index": 0,
@@ -92,7 +96,9 @@ def test_structured_output_with_content(
9296
"content": response.choices[0].message.content,
9397
},
9498
}
95-
assert_message_in_logs(logs[1], "gen_ai.choice", choice_event, spans[0])
99+
assert_message_in_logs(
100+
logs[1], "gen_ai.choice", choice_event, spans[0]
101+
)
96102

97103

98104
def test_structured_output_no_content(
@@ -145,4 +151,6 @@ def test_structured_output_no_content(
145151
"finish_reason": "stop",
146152
"message": {"role": "assistant"},
147153
}
148-
assert_message_in_logs(logs[1], "gen_ai.choice", choice_event, spans[0])
154+
assert_message_in_logs(
155+
logs[1], "gen_ai.choice", choice_event, spans[0]
156+
)

0 commit comments

Comments
 (0)