diff --git a/instrumentation-genai/opentelemetry-instrumentation-anthropic/src/opentelemetry/instrumentation/anthropic/patch.py b/instrumentation-genai/opentelemetry-instrumentation-anthropic/src/opentelemetry/instrumentation/anthropic/patch.py index 5dd0132e10..b10d5de130 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-anthropic/src/opentelemetry/instrumentation/anthropic/patch.py +++ b/instrumentation-genai/opentelemetry-instrumentation-anthropic/src/opentelemetry/instrumentation/anthropic/patch.py @@ -114,7 +114,7 @@ def _create_invocation( ) server_address, server_port = get_server_address_and_port(instance) - invocation = handler.start_inference( + invocation = handler.inference( provider=ANTHROPIC, request_model=request_model, server_address=server_address, diff --git a/util/opentelemetry-util-genai/CHANGELOG.md b/util/opentelemetry-util-genai/CHANGELOG.md index 97dcb93090..c21640b4de 100644 --- a/util/opentelemetry-util-genai/CHANGELOG.md +++ b/util/opentelemetry-util-genai/CHANGELOG.md @@ -13,7 +13,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Apply attribute for sampling on instantiation of all invocation types. ([#4553](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/4553)) - Minor code cleanup and changes in preparation of moving google's GenAI instrumentation - library to use this util library ([#4556](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/4556)) + library to use this util library ([#4556](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/4556), [#4570](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/4570)) ## Version 0.4b0 (2026-05-01) diff --git a/util/opentelemetry-util-genai/src/opentelemetry/util/genai/_invocation.py b/util/opentelemetry-util-genai/src/opentelemetry/util/genai/_invocation.py index f5f6973111..464297350f 100644 --- a/util/opentelemetry-util-genai/src/opentelemetry/util/genai/_invocation.py +++ b/util/opentelemetry-util-genai/src/opentelemetry/util/genai/_invocation.py @@ -5,12 +5,12 @@ import timeit from abc import ABC, abstractmethod -from contextlib import contextmanager from contextvars import Token from dataclasses import asdict -from typing import TYPE_CHECKING, Any, Iterator, Sequence +from types import TracebackType +from typing import TYPE_CHECKING, Any, Sequence -from typing_extensions import Self, TypeAlias +from typing_extensions import TypeAlias from opentelemetry._logs import Logger, LogRecord from opentelemetry.context import Context, attach, detach @@ -138,6 +138,20 @@ def _call_completion_hook( log_record=log_record, ) + def __enter__(self): + return self + + def __exit__( + self, + type_: type[BaseException] | None, + value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + if value: + self.fail(value) + raise + self.stop() + @abstractmethod def _apply_finish(self, error: Error | None = None) -> None: """Apply finish telemetry (attributes, metrics, events).""" @@ -165,16 +179,6 @@ def fail(self, error: Error | BaseException) -> None: error = Error(type=type(error), message=str(error)) self._finish(error) - @contextmanager - def _managed(self) -> Iterator[Self]: - """Context manager that calls stop() on success or fail() on exception.""" - try: - yield self - except Exception as exc: - self.fail(exc) - raise - self.stop() - def get_content_attributes( *, diff --git a/util/opentelemetry-util-genai/src/opentelemetry/util/genai/_tool_invocation.py b/util/opentelemetry-util-genai/src/opentelemetry/util/genai/_tool_invocation.py index 2aa651408e..b8eea643b4 100644 --- a/util/opentelemetry-util-genai/src/opentelemetry/util/genai/_tool_invocation.py +++ b/util/opentelemetry-util-genai/src/opentelemetry/util/genai/_tool_invocation.py @@ -13,6 +13,7 @@ from opentelemetry.util.genai._invocation import Error, GenAIInvocation from opentelemetry.util.genai.completion_hook import CompletionHook from opentelemetry.util.genai.metrics import InvocationMetricsRecorder +from opentelemetry.util.genai.utils import gen_ai_json_dumps class ToolInvocation(GenAIInvocation): @@ -47,6 +48,7 @@ def __init__( tool_call_id: str | None = None, tool_type: str | None = None, tool_description: str | None = None, + tool_result: Any = None, ) -> None: """Use handler.start_tool(name) or handler.tool(name) instead of calling this directly.""" _operation_name = GenAI.GenAiOperationNameValues.EXECUTE_TOOL.value @@ -63,7 +65,7 @@ def __init__( self.tool_call_id = tool_call_id self.tool_type = tool_type self.tool_description = tool_description - self.tool_result: Any = None + self.tool_result: Any = tool_result self._start(self._get_base_attributes()) def _get_base_attributes(self) -> dict[str, Any]: @@ -94,7 +96,18 @@ def _apply_finish(self, error: Error | None = None) -> None: (GenAI.GEN_AI_TOOL_CALL_ID, self.tool_call_id), (GenAI.GEN_AI_TOOL_TYPE, self.tool_type), (GenAI.GEN_AI_TOOL_DESCRIPTION, self.tool_description), - (GenAI.GEN_AI_TOOL_CALL_ARGUMENTS, self.arguments), + ( + GenAI.GEN_AI_TOOL_CALL_ARGUMENTS, + gen_ai_json_dumps(self.arguments) + if self.arguments is not None + else None, + ), + ( + GenAI.GEN_AI_TOOL_CALL_RESULT, + gen_ai_json_dumps(self.tool_result) + if self.tool_result is not None + else None, + ), ) attributes: dict[str, Any] = { GenAI.GEN_AI_OPERATION_NAME: self._operation_name, diff --git a/util/opentelemetry-util-genai/src/opentelemetry/util/genai/handler.py b/util/opentelemetry-util-genai/src/opentelemetry/util/genai/handler.py index 9839e041b9..2d5973039b 100644 --- a/util/opentelemetry-util-genai/src/opentelemetry/util/genai/handler.py +++ b/util/opentelemetry-util-genai/src/opentelemetry/util/genai/handler.py @@ -17,7 +17,7 @@ handler = get_telemetry_handler() # Factory method: construct and start in one call, then stop or fail. - invocation = handler.start_inference("my-provider", request_model="my-model") + invocation = handler.inference("my-provider", request_model="my-model") invocation.input_messages = [...] invocation.temperature = 0.7 try: @@ -38,7 +38,6 @@ from __future__ import annotations import os -from contextlib import AbstractContextManager from opentelemetry._logs import ( LoggerProvider, @@ -132,39 +131,13 @@ def should_capture_content(self) -> bool: """ return self._capture_content - # New-style factory methods: construct + start in one call, handler stored on invocation - - def start_inference( - self, - provider: str, - *, - request_model: str | None = None, - server_address: str | None = None, - server_port: int | None = None, - operation_name: str | None = None, - ) -> InferenceInvocation: - """Create and start an LLM inference invocation. - - Set remaining attributes (input_messages, temperature, etc.) on the - returned invocation, then call invocation.stop() or invocation.fail(). - """ - return InferenceInvocation( - self._tracer, - self._metrics_recorder, - self._logger, - self._completion_hook, - provider, - request_model=request_model, - server_address=server_address, - server_port=server_port, - operation_name=operation_name, - ) + # New-style factory methods: optionally construct + start in one call, handler stored on invocation def start_llm(self, invocation: LLMInvocation) -> LLMInvocation: """Start an LLM invocation. .. deprecated:: - Use ``handler.start_inference()`` instead. + Use ``handler.inference()`` instead. """ invocation._start_with_handler( self._tracer, @@ -174,79 +147,11 @@ def start_llm(self, invocation: LLMInvocation) -> LLMInvocation: ) return invocation - def start_embedding( - self, - provider: str, - *, - request_model: str | None = None, - server_address: str | None = None, - server_port: int | None = None, - ) -> EmbeddingInvocation: - """Create and start an Embedding invocation. - - Set remaining attributes (encoding_formats, etc.) on the returned - invocation, then call invocation.stop() or invocation.fail(). - """ - return EmbeddingInvocation( - self._tracer, - self._metrics_recorder, - self._logger, - self._completion_hook, - provider, - request_model=request_model, - server_address=server_address, - server_port=server_port, - ) - - def start_tool( - self, - name: str, - *, - arguments: object = None, - tool_call_id: str | None = None, - tool_type: str | None = None, - tool_description: str | None = None, - ) -> ToolInvocation: - """Create and start a tool invocation. - - Set tool_result on the returned invocation when done, then call - invocation.stop() or invocation.fail(). - """ - return ToolInvocation( - self._tracer, - self._metrics_recorder, - self._logger, - self._completion_hook, - name, - arguments=arguments, - tool_call_id=tool_call_id, - tool_type=tool_type, - tool_description=tool_description, - ) - - def start_workflow( - self, - *, - name: str | None = None, - ) -> WorkflowInvocation: - """Create and start a workflow invocation. - - Set remaining attributes on the returned invocation, then call - invocation.stop() or invocation.fail(). - """ - return WorkflowInvocation( - self._tracer, - self._metrics_recorder, - self._logger, - self._completion_hook, - name, - ) - def stop_llm(self, invocation: LLMInvocation) -> LLMInvocation: # pylint: disable=no-self-use """Finalize an LLM invocation successfully and end its span. .. deprecated:: - Use ``handler.start_inference()`` and then ``inference.stop()`` instead. + Use ``handler.inference()`` and then ``inference.stop()`` instead. """ invocation._sync_to_invocation() if invocation._inference_invocation is not None: @@ -261,7 +166,7 @@ def fail_llm( # pylint: disable=no-self-use """Fail an LLM invocation and end its span with error status. .. deprecated:: - Use ``handler.start_inference()`` and then ``inference.fail()`` instead. + Use ``handler.inference()`` and then ``inference.fail()`` instead. """ invocation._sync_to_invocation() if invocation._inference_invocation is not None: @@ -275,21 +180,28 @@ def inference( request_model: str | None = None, server_address: str | None = None, server_port: int | None = None, - ) -> AbstractContextManager[InferenceInvocation]: - """Context manager for LLM inference invocations. + operation_name: str | None = None, + ) -> InferenceInvocation: + """This function can be used as a ContextManager Only set data attributes on the invocation object, do not modify the span or context. - Starts the span on entry. On normal exit, finalizes the invocation and ends the span. + + Starts the span when called. If you used as a ContextManager, on exit it finalizes the invocation and ends the span. If an exception occurs inside the context, marks the span as error, ends it, and re-raises the original exception. """ - return self.start_inference( - provider=provider, + return InferenceInvocation( + self._tracer, + self._metrics_recorder, + self._logger, + self._completion_hook, + provider, request_model=request_model, server_address=server_address, server_port=server_port, - )._managed() + operation_name=operation_name, + ) def embedding( self, @@ -298,7 +210,7 @@ def embedding( request_model: str | None = None, server_address: str | None = None, server_port: int | None = None, - ) -> AbstractContextManager[EmbeddingInvocation]: + ) -> EmbeddingInvocation: """Context manager for Embedding invocations. Only set data attributes on the invocation object, do not modify the span or context. @@ -307,12 +219,16 @@ def embedding( If an exception occurs inside the context, marks the span as error, ends it, and re-raises the original exception. """ - return self.start_embedding( - provider=provider, + return EmbeddingInvocation( + self._tracer, + self._metrics_recorder, + self._logger, + self._completion_hook, + provider, request_model=request_model, server_address=server_address, server_port=server_port, - )._managed() + ) def tool( self, @@ -322,7 +238,7 @@ def tool( tool_call_id: str | None = None, tool_type: str | None = None, tool_description: str | None = None, - ) -> AbstractContextManager[ToolInvocation]: + ) -> ToolInvocation: """Context manager for Tool invocations. Only set data attributes on the invocation object, do not modify the span or context. @@ -331,27 +247,34 @@ def tool( If an exception occurs inside the context, marks the span as error, ends it, and re-raises the original exception. """ - return self.start_tool( + return ToolInvocation( + self._tracer, + self._metrics_recorder, + self._logger, + self._completion_hook, name, arguments=arguments, tool_call_id=tool_call_id, tool_type=tool_type, tool_description=tool_description, - )._managed() + ) - def start_invoke_local_agent( + def invoke_local_agent( self, provider: str, *, request_model: str | None = None, agent_name: str | None = None, ) -> AgentInvocation: - """Create and start a local agent invocation (INTERNAL span kind). + """Context manager for local agent invocations (INTERNAL span kind). Use for agents running within the same process (e.g. LangChain, CrewAI). - Set remaining attributes (agent_name, etc.) on the returned invocation, - then call invocation.stop() or invocation.fail(). + Only set data attributes on the invocation object, do not modify the span or context. + + Starts the span on entry. On normal exit, finalizes the invocation and ends the span. + If an exception occurs inside the context, marks the span as error, ends it, and + re-raises the original exception. """ return AgentInvocation( self._tracer, @@ -364,7 +287,7 @@ def start_invoke_local_agent( agent_name=agent_name, ) - def start_invoke_remote_agent( + def invoke_remote_agent( self, provider: str, *, @@ -373,12 +296,15 @@ def start_invoke_remote_agent( server_port: int | None = None, agent_name: str | None = None, ) -> AgentInvocation: - """Create and start a remote agent invocation (CLIENT span kind). + """Context manager for remote agent invocations (CLIENT span kind). Use for agents invoked over a remote service (e.g. OpenAI Assistants, AWS Bedrock). - Set remaining attributes (agent_name, etc.) on the returned invocation, - then call invocation.stop() or invocation.fail(). + Only set data attributes on the invocation object, do not modify the span or context. + + Starts the span on entry. On normal exit, finalizes the invocation and ends the span. + If an exception occurs inside the context, marks the span as error, ends it, and + re-raises the original exception. """ return AgentInvocation( self._tracer, @@ -393,60 +319,10 @@ def start_invoke_remote_agent( server_port=server_port, ) - def invoke_local_agent( - self, - provider: str, - *, - request_model: str | None = None, - agent_name: str | None = None, - ) -> AbstractContextManager[AgentInvocation]: - """Context manager for local agent invocations (INTERNAL span kind). - - Use for agents running within the same process (e.g. LangChain, CrewAI). - - Only set data attributes on the invocation object, do not modify the span or context. - - Starts the span on entry. On normal exit, finalizes the invocation and ends the span. - If an exception occurs inside the context, marks the span as error, ends it, and - re-raises the original exception. - """ - return self.start_invoke_local_agent( - provider, - request_model=request_model, - agent_name=agent_name, - )._managed() - - def invoke_remote_agent( - self, - provider: str, - *, - request_model: str | None = None, - server_address: str | None = None, - server_port: int | None = None, - agent_name: str | None = None, - ) -> AbstractContextManager[AgentInvocation]: - """Context manager for remote agent invocations (CLIENT span kind). - - Use for agents invoked over a remote service (e.g. OpenAI Assistants, AWS Bedrock). - - Only set data attributes on the invocation object, do not modify the span or context. - - Starts the span on entry. On normal exit, finalizes the invocation and ends the span. - If an exception occurs inside the context, marks the span as error, ends it, and - re-raises the original exception. - """ - return self.start_invoke_remote_agent( - provider, - request_model=request_model, - agent_name=agent_name, - server_address=server_address, - server_port=server_port, - )._managed() - def workflow( self, name: str | None = None, - ) -> AbstractContextManager[WorkflowInvocation]: + ) -> WorkflowInvocation: """Context manager for Workflow invocations. Only set data attributes on the invocation object, do not modify the span or context. @@ -455,7 +331,13 @@ def workflow( If an exception occurs inside the context, marks the span as error, ends it, and re-raises the original exception. """ - return self.start_workflow(name=name)._managed() + return WorkflowInvocation( + self._tracer, + self._metrics_recorder, + self._logger, + self._completion_hook, + name, + ) def get_telemetry_handler( diff --git a/util/opentelemetry-util-genai/tests/test_handler_agent.py b/util/opentelemetry-util-genai/tests/test_handler_agent.py index 9f1179e1b5..aa4812d304 100644 --- a/util/opentelemetry-util-genai/tests/test_handler_agent.py +++ b/util/opentelemetry-util-genai/tests/test_handler_agent.py @@ -39,7 +39,7 @@ def setUp(self): self.handler = TelemetryHandler(tracer_provider=tracer_provider) def test_start_stop_creates_span(self): - invocation = self.handler.start_invoke_local_agent( + invocation = self.handler.invoke_local_agent( "openai", request_model="gpt-4", agent_name="Math Tutor", @@ -56,7 +56,7 @@ def test_start_stop_creates_span(self): assert span.attributes[GenAI.GEN_AI_REQUEST_MODEL] == "gpt-4" def test_span_kind_internal(self): - invocation = self.handler.start_invoke_local_agent("openai") + invocation = self.handler.invoke_local_agent("openai") invocation.stop() assert ( self.span_exporter.get_finished_spans()[0].kind @@ -64,14 +64,14 @@ def test_span_kind_internal(self): ) def test_no_server_attributes(self): - invocation = self.handler.start_invoke_local_agent("openai") + invocation = self.handler.invoke_local_agent("openai") invocation.stop() attrs = self.span_exporter.get_finished_spans()[0].attributes assert server_attributes.SERVER_ADDRESS not in attrs assert server_attributes.SERVER_PORT not in attrs def test_all_attributes(self): - invocation = self.handler.start_invoke_local_agent( + invocation = self.handler.invoke_local_agent( "openai", request_model="gpt-4", ) @@ -116,7 +116,7 @@ def test_all_attributes(self): assert attrs[GenAI.GEN_AI_RESPONSE_FINISH_REASONS] == ("stop",) def test_finish_reasons_multiple(self): - invocation = self.handler.start_invoke_local_agent("openai") + invocation = self.handler.invoke_local_agent("openai") invocation.finish_reasons = ["stop", "length"] invocation.stop() attrs = self.span_exporter.get_finished_spans()[0].attributes @@ -126,7 +126,7 @@ def test_finish_reasons_multiple(self): ) def test_finish_reasons_empty_list_omitted(self): - invocation = self.handler.start_invoke_local_agent("openai") + invocation = self.handler.invoke_local_agent("openai") invocation.finish_reasons = [] invocation.stop() attrs = self.span_exporter.get_finished_spans()[0].attributes @@ -134,7 +134,7 @@ def test_finish_reasons_empty_list_omitted(self): assert GenAI.GEN_AI_RESPONSE_FINISH_REASONS not in attrs def test_cache_token_attributes(self): - invocation = self.handler.start_invoke_local_agent("openai") + invocation = self.handler.invoke_local_agent("openai") invocation.input_tokens = 100 invocation.cache_creation_input_tokens = 25 invocation.cache_read_input_tokens = 50 @@ -146,7 +146,7 @@ def test_cache_token_attributes(self): assert attrs[GenAI.GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS] == 50 def test_fail_sets_error_status(self): - invocation = self.handler.start_invoke_local_agent("openai") + invocation = self.handler.invoke_local_agent("openai") invocation.fail(RuntimeError("agent crashed")) span = self.span_exporter.get_finished_spans()[0] @@ -183,7 +183,7 @@ def test_context_manager_default_invocation(self): assert len(self.span_exporter.get_finished_spans()) == 1 def test_default_values(self): - invocation = self.handler.start_invoke_local_agent("openai") + invocation = self.handler.invoke_local_agent("openai") invocation.stop() assert invocation._operation_name == "invoke_agent" assert invocation.agent_name is None @@ -198,7 +198,7 @@ def test_default_values(self): assert not invocation.attributes def test_with_messages(self): - invocation = self.handler.start_invoke_local_agent("openai") + invocation = self.handler.invoke_local_agent("openai") invocation.input_messages = [ InputMessage(role="user", parts=[Text(content="Hello")]) ] @@ -214,7 +214,7 @@ def test_with_messages(self): assert invocation.input_messages[0].role == "user" def test_custom_attributes(self): - invocation = self.handler.start_invoke_local_agent("openai") + invocation = self.handler.invoke_local_agent("openai") invocation.attributes["custom.key"] = "custom_value" invocation.stop() spans = self.span_exporter.get_finished_spans() @@ -226,7 +226,7 @@ def test_tool_definitions_type(self): description="Get the weather", parameters={"type": "object", "properties": {}}, ) - invocation = self.handler.start_invoke_local_agent("openai") + invocation = self.handler.invoke_local_agent("openai") invocation.tool_definitions = [tool] invocation.stop() assert len(invocation.tool_definitions) == 1 @@ -234,23 +234,23 @@ def test_tool_definitions_type(self): assert invocation.tool_definitions[0].type == "function" def test_default_lists_are_independent(self): - inv1 = self.handler.start_invoke_local_agent("openai") - inv2 = self.handler.start_invoke_local_agent("openai") + inv1 = self.handler.invoke_local_agent("openai") + inv2 = self.handler.invoke_local_agent("openai") inv1.input_messages.append(InputMessage(role="user", parts=[])) assert len(inv2.input_messages) == 0 inv2.stop() inv1.stop() def test_default_attributes_are_independent(self): - inv1 = self.handler.start_invoke_local_agent("openai") - inv2 = self.handler.start_invoke_local_agent("openai") + inv1 = self.handler.invoke_local_agent("openai") + inv2 = self.handler.invoke_local_agent("openai") inv1.attributes["foo"] = "bar" assert "foo" not in inv2.attributes inv2.stop() inv1.stop() def test_agent_name_set_after_construction(self): - invocation = self.handler.start_invoke_local_agent("openai") + invocation = self.handler.invoke_local_agent("openai") invocation.agent_name = "Named Agent" invocation.stop() span = self.span_exporter.get_finished_spans()[0] @@ -260,7 +260,7 @@ def test_agent_name_set_after_construction(self): assert span.attributes[GenAI.GEN_AI_AGENT_NAME] == "Named Agent" def test_agent_name_passed_at_construction(self): - invocation = self.handler.start_invoke_local_agent( + invocation = self.handler.invoke_local_agent( "openai", agent_name="Constructor Agent" ) invocation.stop() @@ -293,7 +293,7 @@ def get_description(self): ) handler = TelemetryHandler(tracer_provider=sampler_provider) - invocation = handler.start_invoke_local_agent( + invocation = handler.invoke_local_agent( "openai", agent_name="Sampler Agent" ) invocation.stop() @@ -325,7 +325,7 @@ def get_description(self): ) handler = TelemetryHandler(tracer_provider=sampler_provider) - invocation = handler.start_invoke_local_agent("openai") + invocation = handler.invoke_local_agent("openai") invocation.stop() assert GenAI.GEN_AI_AGENT_NAME not in captured_attributes @@ -349,7 +349,7 @@ def setUp(self): return_value=True, ) def test_system_instruction_on_span(self, _mock_exp, _mock_cap): - invocation = self.handler.start_invoke_local_agent("openai") + invocation = self.handler.invoke_local_agent("openai") invocation.system_instruction = [ Text(content="You are a helpful assistant."), ] @@ -372,7 +372,7 @@ def test_tool_definitions_on_span(self, _mock_exp, _mock_cap): description="Get the weather", parameters={"type": "object", "properties": {}}, ) - invocation = self.handler.start_invoke_local_agent("openai") + invocation = self.handler.invoke_local_agent("openai") invocation.tool_definitions = [tool] invocation.stop() @@ -388,7 +388,7 @@ def test_tool_definitions_on_span(self, _mock_exp, _mock_cap): return_value=True, ) def test_messages_on_span(self, _mock_exp, _mock_cap): - invocation = self.handler.start_invoke_local_agent("openai") + invocation = self.handler.invoke_local_agent("openai") invocation.input_messages = [ InputMessage(role="user", parts=[Text(content="Hello")]) ] @@ -406,7 +406,7 @@ def test_messages_on_span(self, _mock_exp, _mock_cap): assert GenAI.GEN_AI_OUTPUT_MESSAGES in attrs def test_content_not_on_span_by_default(self): - invocation = self.handler.start_invoke_local_agent("openai") + invocation = self.handler.invoke_local_agent("openai") invocation.system_instruction = [ Text(content="You are a helpful assistant."), ] @@ -430,14 +430,14 @@ def setUp(self): self.handler = TelemetryHandler(tracer_provider=tracer_provider) def test_span_kind_client(self): - invocation = self.handler.start_invoke_remote_agent("openai") + invocation = self.handler.invoke_remote_agent("openai") invocation.stop() assert ( self.span_exporter.get_finished_spans()[0].kind == SpanKind.CLIENT ) def test_server_attributes(self): - invocation = self.handler.start_invoke_remote_agent( + invocation = self.handler.invoke_remote_agent( "openai", server_address="api.openai.com", server_port=443, @@ -448,7 +448,7 @@ def test_server_attributes(self): assert attrs[server_attributes.SERVER_PORT] == 443 def test_all_attributes(self): - invocation = self.handler.start_invoke_remote_agent( + invocation = self.handler.invoke_remote_agent( "openai", request_model="gpt-4", server_address="api.openai.com", @@ -472,7 +472,7 @@ def test_all_attributes(self): assert attrs[GenAI.GEN_AI_REQUEST_MODEL] == "gpt-4" def test_fail_sets_error_status(self): - invocation = self.handler.start_invoke_remote_agent("openai") + invocation = self.handler.invoke_remote_agent("openai") invocation.fail(RuntimeError("remote agent crashed")) span = self.span_exporter.get_finished_spans()[0] @@ -532,7 +532,7 @@ def get_description(self): ) handler = TelemetryHandler(tracer_provider=sampler_provider) - invocation = handler.start_invoke_remote_agent( + invocation = handler.invoke_remote_agent( "test-provider", request_model="agent-model", agent_name="Math Tutor", @@ -563,7 +563,7 @@ def test_local_agent_records_duration_and_tokens(self) -> None: meter_provider=self.meter_provider, ) with patch("timeit.default_timer", return_value=1000.0): - invocation = handler.start_invoke_local_agent( + invocation = handler.invoke_local_agent( "prov", request_model="model" ) invocation.input_tokens = 5 @@ -612,7 +612,7 @@ def test_remote_agent_records_duration_with_server_attrs(self) -> None: tracer_provider=self.tracer_provider, meter_provider=self.meter_provider, ) - invocation = handler.start_invoke_remote_agent( + invocation = handler.invoke_remote_agent( "prov", request_model="model", server_address="agent.example.com", @@ -635,7 +635,7 @@ def test_fail_agent_records_error_metric(self) -> None: meter_provider=self.meter_provider, ) with patch("timeit.default_timer", return_value=2000.0): - invocation = handler.start_invoke_local_agent( + invocation = handler.invoke_local_agent( "", request_model="err-model" ) invocation.input_tokens = 11 diff --git a/util/opentelemetry-util-genai/tests/test_handler_completion_hook.py b/util/opentelemetry-util-genai/tests/test_handler_completion_hook.py index 8ed714b04f..19bd874a6d 100644 --- a/util/opentelemetry-util-genai/tests/test_handler_completion_hook.py +++ b/util/opentelemetry-util-genai/tests/test_handler_completion_hook.py @@ -79,7 +79,7 @@ def test_hook_called_on_stop(self): ) ] - invocation = handler.start_inference("openai", request_model="gpt-4o") + invocation = handler.inference("openai", request_model="gpt-4o") invocation.input_messages = input_messages invocation.output_messages = output_messages invocation.system_instruction = system_instruction @@ -102,7 +102,7 @@ def test_hook_called_on_fail(self): InputMessage(role="user", parts=[Text(content="hello")]) ] - invocation = handler.start_inference("openai", request_model="gpt-4o") + invocation = handler.inference("openai", request_model="gpt-4o") invocation.input_messages = input_messages invocation.fail(ValueError("boom")) @@ -114,7 +114,7 @@ def test_hook_called_on_fail(self): def test_hook_not_called_when_not_set(self): # No hook — stop should not raise handler = self._make_handler() - handler.start_inference("openai", request_model="gpt-4o").stop() + handler.inference("openai", request_model="gpt-4o").stop() def test_log_record_is_none_when_events_disabled(self): # Default env: no experimental mode, so log_record should be None. @@ -123,7 +123,7 @@ def test_log_record_is_none_when_events_disabled(self): hook = MagicMock() handler = self._make_handler(hook) - handler.start_inference("openai", request_model="gpt-4o").stop() + handler.inference("openai", request_model="gpt-4o").stop() kwargs = hook.on_completion.call_args.kwargs self.assertIsNone(kwargs["log_record"]) @@ -137,7 +137,7 @@ def test_log_record_passed_when_events_enabled(self): hook = MagicMock() handler = self._make_handler(hook) - handler.start_inference("openai", request_model="gpt-4o").stop() + handler.inference("openai", request_model="gpt-4o").stop() kwargs = hook.on_completion.call_args.kwargs self.assertIsNotNone(kwargs["log_record"]) @@ -162,7 +162,7 @@ def stamp_ref(*, log_record, **kwargs): hook = MagicMock(on_completion=stamp_ref) handler = self._make_handler(hook) - handler.start_inference("openai", request_model="gpt-4o").stop() + handler.inference("openai", request_model="gpt-4o").stop() # The record the hook stamped is the same one that would be emitted self.assertIsNotNone(stamped_record) @@ -281,7 +281,7 @@ def test_workflow_hook_called_on_stop_with_messages(self): ) ] - invocation = handler.start_workflow(name="my-workflow") + invocation = handler.workflow(name="my-workflow") invocation.input_messages = input_messages invocation.output_messages = output_messages invocation.stop() @@ -300,7 +300,7 @@ def test_workflow_hook_called_on_fail(self): hook = MagicMock() handler = self._make_handler(hook) - invocation = handler.start_workflow(name="my-workflow") + invocation = handler.workflow(name="my-workflow") invocation.input_messages = [ InputMessage(role="user", parts=[Text(content="hello")]) ] @@ -314,7 +314,7 @@ def test_workflow_hook_called_with_empty_messages_when_none_set(self): hook = MagicMock() handler = self._make_handler(hook) - handler.start_workflow(name="my-workflow").stop() + handler.workflow(name="my-workflow").stop() hook.on_completion.assert_called_once() kwargs = hook.on_completion.call_args.kwargs @@ -344,7 +344,7 @@ def test_local_agent_hook_called_on_stop_with_messages(self): ) ] - invocation = handler.start_invoke_local_agent( + invocation = handler.invoke_local_agent( "openai", request_model="gpt-4" ) invocation.agent_name = "Math Tutor" @@ -367,7 +367,7 @@ def test_local_agent_hook_called_on_fail(self): hook = MagicMock() handler = self._make_handler(hook) - invocation = handler.start_invoke_local_agent( + invocation = handler.invoke_local_agent( "openai", request_model="gpt-4" ) invocation.input_messages = [ @@ -402,7 +402,7 @@ def test_remote_agent_hook_called_on_stop_with_messages(self): ) ] - invocation = handler.start_invoke_remote_agent( + invocation = handler.invoke_remote_agent( "openai", request_model="gpt-4", server_address="api.openai.com", @@ -425,7 +425,7 @@ def test_remote_agent_hook_called_on_fail(self): hook = MagicMock() handler = self._make_handler(hook) - invocation = handler.start_invoke_remote_agent("openai") + invocation = handler.invoke_remote_agent("openai") invocation.fail(RuntimeError("remote agent crashed")) hook.on_completion.assert_called_once() @@ -436,8 +436,8 @@ def test_agent_hook_called_with_empty_messages_when_none_set(self): hook = MagicMock() handler = self._make_handler(hook) - handler.start_invoke_local_agent("openai").stop() - handler.start_invoke_remote_agent("openai").stop() + handler.invoke_local_agent("openai").stop() + handler.invoke_remote_agent("openai").stop() for call in hook.on_completion.call_args_list: self.assertEqual(call.kwargs["inputs"], []) @@ -448,5 +448,5 @@ def test_agent_hook_called_with_empty_messages_when_none_set(self): def test_agent_hook_not_called_when_not_set(self): # No hook — stop should not raise handler = self._make_handler() - handler.start_invoke_local_agent("openai").stop() - handler.start_invoke_remote_agent("openai").stop() + handler.invoke_local_agent("openai").stop() + handler.invoke_remote_agent("openai").stop() diff --git a/util/opentelemetry-util-genai/tests/test_handler_metrics.py b/util/opentelemetry-util-genai/tests/test_handler_metrics.py index 5e2a58fd3c..37a13cdd34 100644 --- a/util/opentelemetry-util-genai/tests/test_handler_metrics.py +++ b/util/opentelemetry-util-genai/tests/test_handler_metrics.py @@ -27,7 +27,7 @@ def test_stop_llm_records_duration_and_tokens(self) -> None: ) # Patch default_timer during start to ensure monotonic_start_s with patch("timeit.default_timer", return_value=1000.0): - invocation = handler.start_inference("prov", request_model="model") + invocation = handler.inference("prov", request_model="model") invocation.input_tokens = 5 invocation.output_tokens = 7 @@ -82,7 +82,7 @@ def test_stop_llm_records_duration_and_tokens_with_additional_attributes( meter_provider=self.meter_provider, ) - invocation = handler.start_inference( + invocation = handler.inference( "prov", request_model="model", server_address="custom.server.com", @@ -121,7 +121,7 @@ def test_fail_llm_records_error_and_available_tokens(self) -> None: ) # Patch default_timer during start to ensure monotonic_start_s with patch("timeit.default_timer", return_value=2000.0): - invocation = handler.start_inference("", request_model="err-model") + invocation = handler.inference("", request_model="err-model") invocation.input_tokens = 11 error = Error(message="boom", type=ValueError) @@ -193,7 +193,7 @@ def test_stop_embedding_records_duration_and_tokens(self) -> None: ) # Patch default_timer during start to ensure monotonic_start_s with patch("timeit.default_timer", return_value=1000.0): - invocation = handler.start_embedding( + invocation = handler.embedding( "embed-prov", request_model="embed-model" ) invocation.input_tokens = 100 @@ -242,7 +242,7 @@ def test_stop_embedding_records_duration_with_additional_attributes( tracer_provider=self.tracer_provider, meter_provider=self.meter_provider, ) - invocation = handler.start_embedding( + invocation = handler.embedding( "embed-prov", request_model="embed-model", server_address="embed.server.com", @@ -279,7 +279,7 @@ def test_fail_embedding_records_error_and_duration(self) -> None: meter_provider=self.meter_provider, ) with patch("timeit.default_timer", return_value=3000.0): - invocation = handler.start_embedding( + invocation = handler.embedding( "embed-prov", request_model="embed-err-model" ) @@ -313,7 +313,7 @@ def test_stop_embedding_without_tokens(self) -> None: tracer_provider=self.tracer_provider, meter_provider=self.meter_provider, ) - invocation = handler.start_embedding( + invocation = handler.embedding( "embed-prov", request_model="embed-model" ) # input_tokens is not set @@ -343,7 +343,7 @@ def test_stop_tool_records_duration(self) -> None: meter_provider=self.meter_provider, ) with patch("timeit.default_timer", return_value=1000.0): - invocation = handler.start_tool("get_weather") + invocation = handler.tool("get_weather") invocation.metric_attributes = {"custom.key": "custom_value"} with patch("timeit.default_timer", return_value=1002.5): @@ -371,7 +371,7 @@ def test_fail_tool_records_duration_with_error(self) -> None: meter_provider=self.meter_provider, ) with patch("timeit.default_timer", return_value=500.0): - invocation = handler.start_tool("failing_tool") + invocation = handler.tool("failing_tool") error = Error(message="Tool execution failed", type=RuntimeError) with patch("timeit.default_timer", return_value=501.5): diff --git a/util/opentelemetry-util-genai/tests/test_handler_workflow.py b/util/opentelemetry-util-genai/tests/test_handler_workflow.py index f7fbb863a5..a304f5b820 100644 --- a/util/opentelemetry-util-genai/tests/test_handler_workflow.py +++ b/util/opentelemetry-util-genai/tests/test_handler_workflow.py @@ -52,12 +52,12 @@ class TelemetryHandlerWorkflowTest(_WorkflowTestBase): # ------------------------------------------------------------------ def test_start_workflow_creates_span(self) -> None: - invocation = self.handler.start_workflow(name="my_workflow") + invocation = self.handler.workflow(name="my_workflow") self.assertIsNot(invocation.span, INVALID_SPAN) invocation.stop() def test_start_workflow_span_name(self) -> None: - invocation = self.handler.start_workflow(name="my_pipeline") + invocation = self.handler.workflow(name="my_pipeline") invocation.stop() spans = self._get_finished_spans() @@ -65,7 +65,7 @@ def test_start_workflow_span_name(self) -> None: self.assertEqual(spans[0].name, "invoke_workflow my_pipeline") def test_start_workflow_span_name_without_name(self) -> None: - invocation = self.handler.start_workflow(name=None) + invocation = self.handler.workflow(name=None) invocation.stop() spans = self._get_finished_spans() @@ -73,7 +73,7 @@ def test_start_workflow_span_name_without_name(self) -> None: self.assertEqual(spans[0].name, "invoke_workflow") def test_start_workflow_span_kind_is_internal(self) -> None: - invocation = self.handler.start_workflow(name="wf") + invocation = self.handler.workflow(name="wf") invocation.stop() spans = self._get_finished_spans() @@ -82,7 +82,7 @@ def test_start_workflow_span_kind_is_internal(self) -> None: def test_start_workflow_records_monotonic_start(self) -> None: with patch("timeit.default_timer", return_value=500.0): - invocation = self.handler.start_workflow(name="wf") + invocation = self.handler.workflow(name="wf") self.assertEqual(invocation._monotonic_start_s, 500.0) invocation.stop() @@ -91,14 +91,14 @@ def test_start_workflow_records_monotonic_start(self) -> None: # ------------------------------------------------------------------ def test_stop_workflow_ends_span(self) -> None: - invocation = self.handler.start_workflow(name="wf") + invocation = self.handler.workflow(name="wf") invocation.stop() spans = self._get_finished_spans() self.assertEqual(len(spans), 1) def test_stop_workflow_sets_operation_name_attribute(self) -> None: - invocation = self.handler.start_workflow(name="wf") + invocation = self.handler.workflow(name="wf") invocation.stop() spans = self._get_finished_spans() @@ -108,7 +108,7 @@ def test_stop_workflow_sets_operation_name_attribute(self) -> None: ) def test_stop_workflow_sets_custom_attributes(self) -> None: - invocation = self.handler.start_workflow(name="wf") + invocation = self.handler.workflow(name="wf") invocation.attributes["custom.key"] = "custom_value" invocation.stop() @@ -116,7 +116,7 @@ def test_stop_workflow_sets_custom_attributes(self) -> None: self.assertEqual(spans[0].attributes["custom.key"], "custom_value") def test_stop_workflow_returns_invocation(self) -> None: - invocation = self.handler.start_workflow(name="wf") + invocation = self.handler.workflow(name="wf") invocation.stop() spans = self._get_finished_spans() self.assertEqual(len(spans), 1) @@ -126,7 +126,7 @@ def test_stop_workflow_returns_invocation(self) -> None: # ------------------------------------------------------------------ def test_fail_workflow_sets_error_status(self) -> None: - invocation = self.handler.start_workflow(name="wf") + invocation = self.handler.workflow(name="wf") error = Error(message="something broke", type=RuntimeError) invocation.fail(error) @@ -136,7 +136,7 @@ def test_fail_workflow_sets_error_status(self) -> None: self.assertEqual(spans[0].status.description, "something broke") def test_fail_workflow_sets_error_type_attribute(self) -> None: - invocation = self.handler.start_workflow(name="wf") + invocation = self.handler.workflow(name="wf") error = Error(message="bad", type=ValueError) invocation.fail(error) @@ -144,7 +144,7 @@ def test_fail_workflow_sets_error_type_attribute(self) -> None: self.assertEqual(spans[0].attributes["error.type"], "ValueError") def test_fail_workflow_sets_operation_name_attribute(self) -> None: - invocation = self.handler.start_workflow(name="wf") + invocation = self.handler.workflow(name="wf") error = Error(message="fail", type=TypeError) invocation.fail(error) @@ -155,7 +155,7 @@ def test_fail_workflow_sets_operation_name_attribute(self) -> None: ) def test_fail_workflow_ends_span(self) -> None: - invocation = self.handler.start_workflow(name="wf") + invocation = self.handler.workflow(name="wf") invocation.fail(Error(message="err", type=RuntimeError)) spans = self._get_finished_spans() self.assertEqual(len(spans), 1) @@ -211,7 +211,7 @@ def get_description(self): ) handler = TelemetryHandler(tracer_provider=sampler_provider) - invocation = handler.start_workflow(name="my-workflow") + invocation = handler.workflow(name="my-workflow") invocation.stop() self.assertEqual( diff --git a/util/opentelemetry-util-genai/tests/test_toolcall.py b/util/opentelemetry-util-genai/tests/test_toolcall.py index 54648f061a..0f65e4411d 100644 --- a/util/opentelemetry-util-genai/tests/test_toolcall.py +++ b/util/opentelemetry-util-genai/tests/test_toolcall.py @@ -40,7 +40,7 @@ def test_toolcallrequest_is_message_part(): def test_toolcall_inherits_from_genaiinvocation(): """ToolInvocation inherits from GenAIInvocation for lifecycle management""" handler = _make_handler() - tc = handler.start_tool("get_weather", arguments={"city": "Paris"}) + tc = handler.tool("get_weather", arguments={"city": "Paris"}) assert isinstance(tc, GenAIInvocation) assert not isinstance(tc, ToolCallRequest) tc.stop() @@ -49,7 +49,7 @@ def test_toolcall_inherits_from_genaiinvocation(): def test_toolcall_has_attributes_dict(): """ToolInvocation inherits attributes dict from GenAIInvocation""" handler = _make_handler() - tc = handler.start_tool("test") + tc = handler.tool("test") tc.attributes["custom.key"] = "value" assert tc.attributes["custom.key"] == "value" tc.stop() @@ -69,7 +69,7 @@ def test_toolcallrequest_in_message_part_union(): def test_toolcall_operation_name(): """ToolInvocation operation_name is fixed to execute_tool""" handler = _make_handler() - tc = handler.start_tool("my_tool") + tc = handler.tool("my_tool") assert tc._operation_name == "execute_tool" tc.stop() @@ -160,7 +160,7 @@ def get_description(self): sampler_provider.add_span_processor(SimpleSpanProcessor(span_exporter)) handler = TelemetryHandler(tracer_provider=sampler_provider) - invocation = handler.start_tool( + invocation = handler.tool( "get_weather", tool_call_id="call_123", tool_type="function", diff --git a/util/opentelemetry-util-genai/tests/test_utils.py b/util/opentelemetry-util-genai/tests/test_utils.py index 969dc96504..4059bc0cf3 100644 --- a/util/opentelemetry-util-genai/tests/test_utils.py +++ b/util/opentelemetry-util-genai/tests/test_utils.py @@ -320,7 +320,7 @@ def test_llm_manual_start_and_stop_creates_span(self): message = _create_input_message("hi") chat_generation = _create_output_message("ok") - invocation = self.telemetry_handler.start_inference( + invocation = self.telemetry_handler.inference( "test-provider", request_model="manual-model" ) invocation.input_messages = [message] @@ -380,7 +380,7 @@ def get_description(self): handler = TelemetryHandler(tracer_provider=sampler_provider) - invocation = handler.start_inference( + invocation = handler.inference( "test-provider", request_model="sampler-model", server_address="api.example.com", @@ -437,13 +437,13 @@ def get_description(self): handler = TelemetryHandler(tracer_provider=sampler_provider) # This invocation should be dropped - invocation = handler.start_inference( + invocation = handler.inference( "test-provider", request_model="rejected-model" ) invocation.stop() # This invocation should be recorded - invocation = handler.start_inference( + invocation = handler.inference( "test-provider", request_model="accepted-model" ) invocation.stop() @@ -478,7 +478,7 @@ def get_description(self): ) handler = TelemetryHandler(tracer_provider=sampler_provider) - invocation = handler.start_embedding( + invocation = handler.embedding( "test-provider", request_model="embed-model", server_address="embed.example.com", @@ -498,7 +498,7 @@ def get_description(self): assert captured_attributes[server_attributes.SERVER_PORT] == 443 def test_llm_span_finish_reasons_without_output_messages(self): - invocation = self.telemetry_handler.start_inference( + invocation = self.telemetry_handler.inference( "test-provider", request_model="model-without-output" ) invocation.finish_reasons = ["length"] @@ -527,7 +527,7 @@ def test_llm_span_finish_reasons_without_output_messages(self): ) def test_llm_span_finish_reasons_from_invocation(self): - invocation = self.telemetry_handler.start_inference( + invocation = self.telemetry_handler.inference( "test-provider", request_model="model-reasons" ) invocation.finish_reasons = ["stop", "length", "stop"] @@ -542,7 +542,7 @@ def test_llm_span_finish_reasons_from_invocation(self): ) def test_llm_span_finish_reasons_from_output_messages(self): - invocation = self.telemetry_handler.start_inference( + invocation = self.telemetry_handler.inference( "test-provider", request_model="model-output-reasons" ) assert invocation.span is not None @@ -561,7 +561,7 @@ def test_llm_span_finish_reasons_from_output_messages(self): ) def test_llm_span_uses_expected_schema_url(self): - invocation = self.telemetry_handler.start_inference( + invocation = self.telemetry_handler.inference( "schema-provider", request_model="schema-model" ) assert invocation.span is not None @@ -584,7 +584,7 @@ def test_llm_span_uses_expected_schema_url(self): emit_event="true", ) def test_llm_log_uses_expected_schema_url(self): - invocation = self.telemetry_handler.start_inference( + invocation = self.telemetry_handler.inference( "schema-provider", request_model="schema-model" ) invocation.output_messages = [_create_output_message()] @@ -639,12 +639,12 @@ def test_parent_child_span_relationship(self): emit_event="", ) def test_embedding_parent_child_span_relationship(self): - parent_invocation = self.telemetry_handler.start_embedding( + parent_invocation = self.telemetry_handler.embedding( "test-provider", request_model="embed-parent-model" ) parent_invocation.input_tokens = 10 assert parent_invocation.span is not None - child_invocation = self.telemetry_handler.start_embedding( + child_invocation = self.telemetry_handler.embedding( "test-provider", request_model="embed-child-model" ) child_invocation.input_tokens = 5 @@ -679,7 +679,7 @@ def test_llm_parent_embedding_child_span_relationship(self): "test-provider", request_model="parent-model" ) as parent_invocation: parent_invocation.input_messages = [message] - child_invocation = self.telemetry_handler.start_embedding( + child_invocation = self.telemetry_handler.embedding( "test-provider", request_model="embed-child-model" ) child_invocation.input_tokens = 3 @@ -790,7 +790,7 @@ class BoomError(RuntimeError): emit_event="", ) def test_embedding_manual_start_and_stop_creates_span(self): - invocation = self.telemetry_handler.start_embedding( + invocation = self.telemetry_handler.embedding( "test-provider", request_model="embed-model", server_address="custom.server.com", @@ -831,7 +831,7 @@ def test_fail_with_exception_sets_error_status_and_type(self): class BoomError(RuntimeError): pass - invocation = self.telemetry_handler.start_inference( + invocation = self.telemetry_handler.inference( "test-provider", request_model="test-model" ) invocation.fail(BoomError("boom")) diff --git a/util/opentelemetry-util-genai/tests/test_utils_events.py b/util/opentelemetry-util-genai/tests/test_utils_events.py index bbdd342a0d..6c5686d08b 100644 --- a/util/opentelemetry-util-genai/tests/test_utils_events.py +++ b/util/opentelemetry-util-genai/tests/test_utils_events.py @@ -61,7 +61,7 @@ def tearDown(self): emit_event="true", ) def test_emits_llm_event(self): - invocation = self.telemetry_handler.start_inference( + invocation = self.telemetry_handler.inference( "test-provider", request_model="event-model" ) invocation.input_messages = [_create_input_message("test query")] @@ -143,7 +143,7 @@ def test_emits_llm_event_and_span(self): chat_generation = _create_output_message("combined response") system_instruction = _create_system_instruction("System prompt here") - invocation = self.telemetry_handler.start_inference( + invocation = self.telemetry_handler.inference( "test-provider", request_model="combined-model" ) invocation.input_messages = [message] @@ -200,7 +200,7 @@ class TestError(RuntimeError): pass message = _create_input_message("error test") - invocation = self.telemetry_handler.start_inference( + invocation = self.telemetry_handler.inference( "test-provider", request_model="error-model" ) invocation.input_messages = [message] @@ -236,7 +236,7 @@ def test_does_not_emit_llm_event_when_emit_event_false(self): message = _create_input_message("emit false test") chat_generation = _create_output_message("emit false response") - invocation = self.telemetry_handler.start_inference( + invocation = self.telemetry_handler.inference( "test-provider", request_model="emit-false-model" ) invocation.input_messages = [message] @@ -254,7 +254,7 @@ def test_does_not_emit_llm_event_when_emit_event_false(self): ) def test_does_not_emit_llm_event_by_default_for_no_content(self): """Test that event is not emitted by default when content_capturing is NO_CONTENT and OTEL_INSTRUMENTATION_GENAI_EMIT_EVENT is not set.""" - invocation = self.telemetry_handler.start_inference( + invocation = self.telemetry_handler.inference( "test-provider", request_model="default-model" ) invocation.input_messages = [_create_input_message("default test")] @@ -274,7 +274,7 @@ def test_does_not_emit_llm_event_by_default_for_no_content(self): ) def test_does_not_emit_llm_event_by_default_for_span_only(self): """Test that event is not emitted by default when content_capturing is SPAN_ONLY and OTEL_INSTRUMENTATION_GENAI_EMIT_EVENT is not set.""" - invocation = self.telemetry_handler.start_inference( + invocation = self.telemetry_handler.inference( "test-provider", request_model="default-model" ) invocation.input_messages = [_create_input_message("default test")] @@ -294,7 +294,7 @@ def test_does_not_emit_llm_event_by_default_for_span_only(self): ) def test_emits_llm_event_by_default_for_event_only(self): """Test that event is emitted by default when content_capturing is EVENT_ONLY and OTEL_INSTRUMENTATION_GENAI_EMIT_EVENT is not set.""" - invocation = self.telemetry_handler.start_inference( + invocation = self.telemetry_handler.inference( "test-provider", request_model="default-model" ) invocation.input_messages = [_create_input_message("default test")] @@ -322,7 +322,7 @@ def test_emits_llm_event_by_default_for_span_and_event(self): chat_generation = _create_output_message("span and event response") system_instruction = _create_system_instruction("System prompt") - invocation = self.telemetry_handler.start_inference( + invocation = self.telemetry_handler.inference( "test-provider", request_model="span-and-event-model" ) invocation.input_messages = [message] diff --git a/util/opentelemetry-util-genai/tests/test_workflow_invocation.py b/util/opentelemetry-util-genai/tests/test_workflow_invocation.py index e6959ed561..b8c86b299d 100644 --- a/util/opentelemetry-util-genai/tests/test_workflow_invocation.py +++ b/util/opentelemetry-util-genai/tests/test_workflow_invocation.py @@ -27,7 +27,7 @@ def setUp(self): self.handler = TelemetryHandler(tracer_provider=tracer_provider) def test_default_values(self): - invocation = self.handler.start_workflow(name=None) + invocation = self.handler.workflow(name=None) invocation.stop() assert invocation.name is None assert invocation._operation_name == "invoke_workflow" @@ -37,7 +37,7 @@ def test_default_values(self): assert not invocation.attributes def test_custom_name(self): - invocation = self.handler.start_workflow( + invocation = self.handler.workflow( name="customer_support_pipeline" ) invocation.stop() @@ -45,7 +45,7 @@ def test_custom_name(self): def test_with_input_messages(self): msg = InputMessage(role="user", parts=[Text(content="hello")]) - invocation = self.handler.start_workflow(name="test") + invocation = self.handler.workflow(name="test") invocation.input_messages = [msg] invocation.stop() assert len(invocation.input_messages) == 1 @@ -55,14 +55,14 @@ def test_with_output_messages(self): msg = OutputMessage( role="assistant", parts=[Text(content="hi")], finish_reason="stop" ) - invocation = self.handler.start_workflow(name="test") + invocation = self.handler.workflow(name="test") invocation.output_messages = [msg] invocation.stop() assert len(invocation.output_messages) == 1 assert invocation.output_messages[0].finish_reason == "stop" def test_inherits_genai_invocation(self): - invocation = self.handler.start_workflow(name="test") + invocation = self.handler.workflow(name="test") invocation.attributes["key"] = "value" invocation.stop() spans = self.span_exporter.get_finished_spans() @@ -71,16 +71,16 @@ def test_inherits_genai_invocation(self): def test_default_lists_are_independent(self): """Ensure separate invocations get separate list instances.""" - inv1 = self.handler.start_workflow(name=None) - inv2 = self.handler.start_workflow(name=None) + inv1 = self.handler.workflow(name=None) + inv2 = self.handler.workflow(name=None) inv1.input_messages.append(InputMessage(role="user", parts=[])) assert len(inv2.input_messages) == 0 inv1.stop() inv2.stop() def test_default_attributes_are_independent(self): - inv1 = self.handler.start_workflow(name=None) - inv2 = self.handler.start_workflow(name=None) + inv1 = self.handler.workflow(name=None) + inv2 = self.handler.workflow(name=None) inv1.attributes["foo"] = "bar" assert "foo" not in inv2.attributes inv1.stop() @@ -93,7 +93,7 @@ def test_full_construction(self): parts=[Text(content="answer")], finish_reason="stop", ) - invocation = self.handler.start_workflow(name="my_workflow") + invocation = self.handler.workflow(name="my_workflow") invocation.input_messages = [inp] invocation.output_messages = [out] invocation.stop()