Skip to content

Commit d97bb9e

Browse files
committed
Cleanup code a bunch..
1 parent d96aaee commit d97bb9e

4 files changed

Lines changed: 3223 additions & 3285 deletions

File tree

instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py

Lines changed: 124 additions & 185 deletions
Original file line numberDiff line numberDiff line change
@@ -1,17 +1,5 @@
11
# Copyright The OpenTelemetry Authors
2-
#
3-
# Licensed under the Apache License, Version 2.0 (the "License");
4-
# you may not use this file except in compliance with the License.
5-
# You may obtain a copy of the License at
6-
#
7-
# http://www.apache.org/licenses/LICENSE-2.0
8-
#
9-
# Unless required by applicable law or agreed to in writing, software
10-
# distributed under the License is distributed on an "AS IS" BASIS,
11-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12-
# See the License for the specific language governing permissions and
13-
# limitations under the License.
14-
# pylint: disable=too-many-lines
2+
# SPDX-License-Identifier: Apache-2.0
153

164
import copy
175
import dataclasses
@@ -56,6 +44,9 @@
5644
from opentelemetry.trace.span import Span
5745
from opentelemetry.util.genai.completion_hook import CompletionHook
5846
from opentelemetry.util.genai.handler import TelemetryHandler
47+
from opentelemetry.util.genai.invocation import (
48+
InferenceInvocation,
49+
)
5950
from opentelemetry.util.genai.types import (
6051
ContentCapturingMode,
6152
FunctionToolDefinition,
@@ -557,6 +548,25 @@ def __init__(
557548
)
558549
self._is_async = is_async
559550

551+
def apply_finish_attributes(
552+
self,
553+
invocation: InferenceInvocation,
554+
candidates: list[Candidate],
555+
):
556+
print("Applying the finish attributes now !")
557+
print("Cached tokens: {}".format(self._cached_tokens))
558+
invocation.input_tokens = self._input_tokens
559+
invocation.output_tokens = self._output_tokens
560+
invocation.finish_reasons = sorted(self._finish_reasons_set)
561+
invocation.cache_read_input_tokens = self._cached_tokens
562+
invocation.attributes["gen_ai.usage.reasoning.output_tokens"] = (
563+
self._thinking_tokens
564+
)
565+
if self._content_recording_enabled and candidates:
566+
invocation.output_messages = to_output_messages(
567+
candidates=candidates
568+
)
569+
560570
def wrapped_config(
561571
self, config: Optional[GenerateContentConfigOrDict]
562572
) -> Optional[GenerateContentConfig]:
@@ -934,53 +944,38 @@ def instrumented_generate_content(
934944
)
935945
)
936946
if helper.experimental_sem_convs_enabled:
937-
invocation = telemetry_handler.start_inference(
947+
with telemetry_handler.inference(
938948
provider=helper._genai_system,
939949
request_model=model,
940950
operation_name="generate_content",
941-
)
942-
invocation.attributes.update(extra_attributes)
943-
invocation.tool_definitions = helper._maybe_get_tool_definitions(
944-
config
945-
)
946-
947-
if helper._content_recording_enabled:
948-
invocation.input_messages = to_input_messages(
949-
contents=transformers.t_contents(contents)
951+
) as invocation:
952+
invocation.attributes.update(extra_attributes)
953+
invocation.tool_definitions = (
954+
helper._maybe_get_tool_definitions(config)
950955
)
951-
if system_content := _config_to_system_instruction(config):
952-
invocation.system_instruction = to_system_instructions(
953-
content=transformers.t_contents(system_content)[0]
954-
)
955956

956-
try:
957-
response = wrapped_func(
958-
self,
959-
model=model,
960-
contents=contents,
961-
config=helper.wrapped_config(config),
962-
**kwargs,
963-
)
964-
helper._update_response(response)
965-
966-
invocation.input_tokens = helper._input_tokens
967-
invocation.output_tokens = helper._output_tokens
968-
invocation.finish_reasons = sorted(helper._finish_reasons_set)
969-
invocation.cache_read_input_tokens = helper._cached_tokens
970-
invocation.attributes[
971-
"gen_ai.usage.reasoning.output_tokens"
972-
] = helper._thinking_tokens
973-
974-
if helper._content_recording_enabled and response.candidates:
975-
invocation.output_messages = to_output_messages(
976-
candidates=response.candidates
957+
if helper._content_recording_enabled:
958+
invocation.input_messages = to_input_messages(
959+
contents=transformers.t_contents(contents)
977960
)
978-
979-
invocation.stop()
980-
return response
981-
except Exception as error:
982-
invocation.fail(error)
983-
raise
961+
if system_content := _config_to_system_instruction(config):
962+
invocation.system_instruction = to_system_instructions(
963+
content=transformers.t_contents(system_content)[0]
964+
)
965+
candidates = []
966+
try:
967+
response = wrapped_func(
968+
self,
969+
model=model,
970+
contents=contents,
971+
config=helper.wrapped_config(config),
972+
**kwargs,
973+
)
974+
candidates.extend(response.candidates)
975+
helper._update_response(response)
976+
finally:
977+
print("Applying finsh attributes here..")
978+
helper.apply_finish_attributes(invocation, candidates)
984979
else:
985980
with helper.start_span_as_current_span(
986981
model, "google.genai.Models.generate_content"
@@ -1043,53 +1038,38 @@ def instrumented_generate_content_stream(
10431038
)
10441039
)
10451040
if helper.experimental_sem_convs_enabled:
1046-
invocation = telemetry_handler.start_inference(
1041+
with telemetry_handler.inference(
10471042
provider=helper._genai_system,
10481043
request_model=model,
10491044
operation_name="generate_content",
1050-
)
1051-
invocation.attributes.update(extra_attributes)
1052-
tool_defs = helper._maybe_get_tool_definitions(config)
1053-
invocation.tool_definitions = tool_defs
1054-
1055-
if helper._content_recording_enabled:
1056-
invocation.input_messages = to_input_messages(
1057-
contents=transformers.t_contents(contents)
1058-
)
1059-
if system_content := _config_to_system_instruction(config):
1060-
invocation.system_instruction = to_system_instructions(
1061-
content=transformers.t_contents(system_content)[0]
1045+
) as invocation:
1046+
invocation.attributes.update(extra_attributes)
1047+
tool_defs = helper._maybe_get_tool_definitions(config)
1048+
invocation.tool_definitions = tool_defs
1049+
1050+
if helper._content_recording_enabled:
1051+
invocation.input_messages = to_input_messages(
1052+
contents=transformers.t_contents(contents)
10621053
)
1063-
candidates = []
1064-
try:
1065-
for resp in wrapped_func(
1066-
self,
1067-
model=model,
1068-
contents=contents,
1069-
config=helper.wrapped_config(config),
1070-
**kwargs,
1071-
):
1072-
helper._update_response(resp)
1073-
if resp.candidates:
1074-
candidates += resp.candidates
1054+
if system_content := _config_to_system_instruction(config):
1055+
invocation.system_instruction = to_system_instructions(
1056+
content=transformers.t_contents(system_content)[0]
1057+
)
1058+
candidates = []
1059+
try:
1060+
for resp in wrapped_func(
1061+
self,
1062+
model=model,
1063+
contents=contents,
1064+
config=helper.wrapped_config(config),
1065+
**kwargs,
1066+
):
1067+
helper._update_response(resp)
1068+
if resp.candidates:
1069+
candidates += resp.candidates
10751070
yield resp
1076-
except Exception as error:
1077-
invocation.fail(error)
1078-
raise
1079-
finally:
1080-
invocation.input_tokens = helper._input_tokens
1081-
invocation.output_tokens = helper._output_tokens
1082-
invocation.finish_reasons = sorted(helper._finish_reasons_set)
1083-
invocation.cache_read_input_tokens = helper._cached_tokens
1084-
invocation.attributes[
1085-
"gen_ai.usage.reasoning.output_tokens"
1086-
] = helper._thinking_tokens
1087-
1088-
if helper._content_recording_enabled and candidates:
1089-
invocation.output_messages = to_output_messages(
1090-
candidates=candidates
1091-
)
1092-
invocation.stop()
1071+
finally:
1072+
helper.apply_finish_attributes(invocation, candidates)
10931073
else:
10941074
with helper.start_span_as_current_span(
10951075
model, "google.genai.Models.generate_content_stream"
@@ -1152,53 +1132,39 @@ async def instrumented_generate_content(
11521132
)
11531133
)
11541134
if helper.experimental_sem_convs_enabled:
1155-
invocation = telemetry_handler.start_inference(
1135+
with telemetry_handler.inference(
11561136
provider=helper._genai_system,
11571137
request_model=model,
11581138
operation_name="generate_content",
1159-
)
1160-
invocation.attributes.update(extra_attributes)
1161-
invocation.tool_definitions = (
1162-
await helper._maybe_get_tool_definitions_async(config)
1163-
)
1164-
1165-
if helper._content_recording_enabled:
1166-
invocation.input_messages = to_input_messages(
1167-
contents=transformers.t_contents(contents)
1139+
) as invocation:
1140+
invocation.attributes.update(extra_attributes)
1141+
invocation.tool_definitions = (
1142+
await helper._maybe_get_tool_definitions_async(config)
11681143
)
1169-
if system_content := _config_to_system_instruction(config):
1170-
invocation.system_instruction = to_system_instructions(
1171-
content=transformers.t_contents(system_content)[0]
1172-
)
11731144

1174-
try:
1175-
response = await wrapped_func(
1176-
self,
1177-
model=model,
1178-
contents=contents,
1179-
config=helper.wrapped_config(config),
1180-
**kwargs,
1181-
)
1182-
helper._update_response(response)
1183-
1184-
invocation.input_tokens = helper._input_tokens
1185-
invocation.output_tokens = helper._output_tokens
1186-
invocation.finish_reasons = sorted(helper._finish_reasons_set)
1187-
invocation.cache_read_input_tokens = helper._cached_tokens
1188-
invocation.attributes[
1189-
"gen_ai.usage.reasoning.output_tokens"
1190-
] = helper._thinking_tokens
1191-
1192-
if helper._content_recording_enabled and response.candidates:
1193-
invocation.output_messages = to_output_messages(
1194-
candidates=response.candidates
1145+
if helper._content_recording_enabled:
1146+
invocation.input_messages = to_input_messages(
1147+
contents=transformers.t_contents(contents)
1148+
)
1149+
if system_content := _config_to_system_instruction(config):
1150+
invocation.system_instruction = to_system_instructions(
1151+
content=transformers.t_contents(system_content)[0]
1152+
)
1153+
candidates = []
1154+
try:
1155+
response = await wrapped_func(
1156+
self,
1157+
model=model,
1158+
contents=contents,
1159+
config=helper.wrapped_config(config),
1160+
**kwargs,
11951161
)
1162+
helper._update_response(response)
1163+
candidates += response.candidates
1164+
return response
1165+
finally:
1166+
helper.apply_finish_attributes(invocation, candidates)
11961167

1197-
invocation.stop()
1198-
return response
1199-
except Exception as error:
1200-
invocation.fail(error)
1201-
raise
12021168
else:
12031169
with helper.start_span_as_current_span(
12041170
model, "google.genai.AsyncModels.generate_content"
@@ -1262,68 +1228,41 @@ async def instrumented_generate_content_stream(
12621228
)
12631229
)
12641230
if helper.experimental_sem_convs_enabled:
1265-
invocation = telemetry_handler.start_inference(
1231+
with telemetry_handler.inference(
12661232
provider=helper._genai_system,
12671233
request_model=model,
12681234
operation_name="generate_content",
1269-
)
1270-
invocation.attributes.update(extra_attributes)
1271-
invocation.tool_definitions = (
1272-
await helper._maybe_get_tool_definitions_async(config)
1273-
)
1274-
invocation.input_messages = to_input_messages(
1275-
contents=transformers.t_contents(contents)
1276-
)
1277-
if system_content := _config_to_system_instruction(config):
1278-
invocation.system_instruction = to_system_instructions(
1279-
content=transformers.t_contents(system_content)[0]
1235+
) as invocation:
1236+
invocation.attributes.update(extra_attributes)
1237+
invocation.tool_definitions = (
1238+
await helper._maybe_get_tool_definitions_async(config)
12801239
)
1281-
1282-
try:
1283-
response_async_generator = await wrapped_func(
1284-
self,
1285-
model=model,
1286-
contents=contents,
1287-
config=helper.wrapped_config(config),
1288-
**kwargs,
1240+
invocation.input_messages = to_input_messages(
1241+
contents=transformers.t_contents(contents)
12891242
)
1243+
if system_content := _config_to_system_instruction(config):
1244+
invocation.system_instruction = to_system_instructions(
1245+
content=transformers.t_contents(system_content)[0]
1246+
)
1247+
candidates = []
12901248

1291-
async def _response_async_generator_wrapper():
1292-
candidates = []
1249+
async def _response_async_generator_wrapper(candidates):
12931250
try:
1294-
async for resp in response_async_generator:
1251+
async for resp in await wrapped_func(
1252+
self,
1253+
model=model,
1254+
contents=contents,
1255+
config=helper.wrapped_config(config),
1256+
**kwargs,
1257+
):
12951258
helper._update_response(resp)
12961259
if resp.candidates:
12971260
candidates += resp.candidates
12981261
yield resp
1299-
except Exception as e:
1300-
invocation.fail(e)
1301-
raise
13021262
finally:
1303-
invocation.input_tokens = helper._input_tokens
1304-
invocation.output_tokens = helper._output_tokens
1305-
invocation.finish_reasons = sorted(
1306-
helper._finish_reasons_set
1307-
)
1308-
invocation.cache_read_input_tokens = (
1309-
helper._cached_tokens
1310-
)
1311-
invocation.attributes[
1312-
"gen_ai.usage.reasoning.output_tokens"
1313-
] = helper._thinking_tokens
1314-
1315-
if candidates:
1316-
invocation.output_messages = to_output_messages(
1317-
candidates=candidates
1318-
)
1319-
1320-
invocation.stop()
1321-
1322-
return _response_async_generator_wrapper()
1263+
helper.apply_finish_attributes(invocation, candidates)
13231264

1324-
except Exception as error:
1325-
invocation.fail(error)
1326-
raise
1265+
return _response_async_generator_wrapper(candidates)
13271266
else:
13281267
with helper.start_span_as_current_span(
13291268
model,

0 commit comments

Comments
 (0)