diff --git a/newrelic/core/attribute.py b/newrelic/core/attribute.py index 79b9a56cb2..ed3e5bffa6 100644 --- a/newrelic/core/attribute.py +++ b/newrelic/core/attribute.py @@ -100,6 +100,7 @@ "response.headers.contentType", "response.status", "server.address", + "subcomponent", "zeebe.client.bpmnProcessId", "zeebe.client.messageName", "zeebe.client.correlationKey", diff --git a/newrelic/hooks/adapter_mcp.py b/newrelic/hooks/adapter_mcp.py index bcc8ae0a39..e891df0325 100644 --- a/newrelic/hooks/adapter_mcp.py +++ b/newrelic/hooks/adapter_mcp.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import json import logging from newrelic.api.function_trace import FunctionTrace @@ -37,8 +38,10 @@ async def wrap_call_tool(wrapped, instance, args, kwargs): bound_args = bind_args(wrapped, args, kwargs) tool_name = bound_args.get("name") or "tool" function_trace_name = f"{func_name}/{tool_name}" + agentic_subcomponent_data = {"type": "APM-AI_TOOL", "name": tool_name} - with FunctionTrace(name=function_trace_name, group="Llm/tool/MCP", source=wrapped): + with FunctionTrace(name=function_trace_name, group="Llm/tool/MCP", source=wrapped) as ft: + ft._add_agent_attribute("subcomponent", json.dumps(agentic_subcomponent_data)) return await wrapped(*args, **kwargs) diff --git a/newrelic/hooks/mlmodel_autogen.py b/newrelic/hooks/mlmodel_autogen.py index 87d94a4c44..fbd38adee4 100644 --- a/newrelic/hooks/mlmodel_autogen.py +++ b/newrelic/hooks/mlmodel_autogen.py @@ -12,7 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. - +import contextvars +import json import logging import sys import uuid @@ -20,11 +21,13 @@ from newrelic.api.function_trace import FunctionTrace from newrelic.api.time_trace import get_trace_linking_metadata from newrelic.api.transaction import current_transaction +from newrelic.common.llm_utils import AsyncGeneratorProxy from newrelic.common.object_names import callable_name from newrelic.common.object_wrapper import wrap_function_wrapper from newrelic.common.package_version_utils import get_package_version from newrelic.common.signature import bind_args from newrelic.core.config import global_settings +from newrelic.core.context import ContextOf # Check for the presence of the autogen-core, autogen-agentchat, or autogen-ext package as they should all have the # same version and one or multiple could be installed @@ -35,6 +38,14 @@ ) +# ContextVar used to propagate trace context to tool functions that may run on thread pool threads. +# This allows nested agents created inside tools to find the parent trace. +_nr_tool_parent_trace = contextvars.ContextVar("_nr_tool_parent_trace", default=None) + +# Flag to indicate we're inside wrap_on_messages, so on_messages_stream can skip +# creating a duplicate agent FT (on_messages internally calls on_messages_stream). +_nr_in_on_messages = contextvars.ContextVar("_nr_in_on_messages", default=False) + RECORD_EVENTS_FAILURE_LOG_MESSAGE = "Exception occurred in Autogen instrumentation: Failed to record LLM events. Please report this issue to New Relic Support.\n%s" @@ -54,7 +65,97 @@ async def wrap_from_server_params(wrapped, instance, args, kwargs): return await wrapped(*args, **kwargs) +async def wrap_on_messages(wrapped, instance, args, kwargs): + """Wrap on_messages (a regular async method) with an agent FunctionTrace. + + on_messages is called by run() and internally iterates on_messages_stream. + Since on_messages is awaited (not an async generator), the FT can stay open + for the full execution, making tool FTs proper children of this agent FT. + """ + transaction = current_transaction() + if not transaction: + # When a tool calls an inner agent on a different thread, NR's thread-local context is lost. + # The ContextVar is propagated by asyncio, so we can recover the parent trace from it. + parent_trace = _nr_tool_parent_trace.get(None) + if parent_trace: + with ContextOf(trace=parent_trace): + return await _on_messages_instrumented(wrapped, instance, args, kwargs) + return await wrapped(*args, **kwargs) + + return await _on_messages_instrumented(wrapped, instance, args, kwargs) + + +async def _on_messages_instrumented(wrapped, instance, args, kwargs): + transaction = current_transaction() + if not transaction: + return await wrapped(*args, **kwargs) + + settings = transaction.settings or global_settings() + if not settings.ai_monitoring.enabled: + return await wrapped(*args, **kwargs) + + # Framework metric also used for entity tagging in the UI + transaction.add_ml_model_info("Autogen", AUTOGEN_VERSION) + transaction._add_agent_attribute("llm", True) + + agent_name = getattr(instance, "name", "agent") + agent_id = str(uuid.uuid4()) + func_name = callable_name(wrapped) + function_trace_name = f"{func_name}/{agent_name}" + + agentic_subcomponent_data = {"type": "APM-AI_AGENT", "name": agent_name} + + ft = FunctionTrace(name=function_trace_name, group="Llm/agent/Autogen") + ft.__enter__() + ft._add_agent_attribute("subcomponent", json.dumps(agentic_subcomponent_data)) + + # Set flag so on_messages_stream (called internally) skips creating a duplicate agent FT. + token = _nr_in_on_messages.set(True) + + try: + return_val = await wrapped(*args, **kwargs) + except Exception: + ft.notice_error(attributes={"agent_id": agent_id}) + ft.__exit__(*sys.exc_info()) + agent_event_dict = _construct_base_agent_event_dict(agent_name, agent_id, transaction) + agent_event_dict.update({"duration": ft.duration * 1000, "error": True}) + transaction.record_custom_event("LlmAgent", agent_event_dict) + raise + finally: + _nr_in_on_messages.reset(token) + + ft.__exit__(None, None, None) + + agent_event_dict = _construct_base_agent_event_dict(agent_name, agent_id, transaction) + agent_event_dict["duration"] = ft.duration * 1000 + transaction.record_custom_event("LlmAgent", agent_event_dict) + + return return_val + + def wrap_on_messages_stream(wrapped, instance, args, kwargs): + transaction = current_transaction() + if not transaction: + # When a tool calls an inner agent on a different thread, NR's thread-local context is lost. + # The ContextVar is propagated by asyncio, so we can recover the parent trace from it. + parent_trace = _nr_tool_parent_trace.get(None) + if parent_trace: + with ContextOf(trace=parent_trace): + return _on_messages_stream_instrumented(wrapped, instance, args, kwargs) + return wrapped(*args, **kwargs) + + return _on_messages_stream_instrumented(wrapped, instance, args, kwargs) + + +def _on_messages_stream_instrumented(wrapped, instance, args, kwargs): + """Wrap on_messages_stream with an agent FT. + + on_messages_stream returns an AsyncGenerator. When called from on_messages + (the run() path), the agent FT is already created by wrap_on_messages, so + we skip creating a duplicate here. When called directly (the run_stream() + path), the agent FT stays open and is exited when the generator finishes + via AsyncGeneratorProxy callbacks, keeping tools as children of the agent. + """ transaction = current_transaction() if not transaction: return wrapped(*args, **kwargs) @@ -63,35 +164,147 @@ def wrap_on_messages_stream(wrapped, instance, args, kwargs): if not settings.ai_monitoring.enabled: return wrapped(*args, **kwargs) + # If we're already inside wrap_on_messages, skip the agent FT here to avoid + # a duplicate span. The on_messages wrapper owns the agent FT in that case. + if _nr_in_on_messages.get(False): + return wrapped(*args, **kwargs) + # Framework metric also used for entity tagging in the UI transaction.add_ml_model_info("Autogen", AUTOGEN_VERSION) transaction._add_agent_attribute("llm", True) agent_name = getattr(instance, "name", "agent") agent_id = str(uuid.uuid4()) - agent_event_dict = _construct_base_agent_event_dict(agent_name, agent_id, transaction) func_name = callable_name(wrapped) function_trace_name = f"{func_name}/{agent_name}" + agentic_subcomponent_data = {"type": "APM-AI_AGENT", "name": agent_name} + ft = FunctionTrace(name=function_trace_name, group="Llm/agent/Autogen") ft.__enter__() + ft._add_agent_attribute("subcomponent", json.dumps(agentic_subcomponent_data)) try: return_val = wrapped(*args, **kwargs) except Exception: ft.notice_error(attributes={"agent_id": agent_id}) ft.__exit__(*sys.exc_info()) - # If we hit an exception, append the error attribute and duration from the exited function trace + agent_event_dict = _construct_base_agent_event_dict(agent_name, agent_id, transaction) agent_event_dict.update({"duration": ft.duration * 1000, "error": True}) transaction.record_custom_event("LlmAgent", agent_event_dict) raise - ft.__exit__(None, None, None) - agent_event_dict.update({"duration": ft.duration * 1000}) + # Wrap the async generator with a proxy that keeps the agent FT open during + # iteration. The FT is exited when the generator finishes (StopAsyncIteration) + # or encounters an error. This ensures tool FTs created during iteration are + # children of the agent FT, not siblings. + proxied_return_val = _AutogenAsyncGeneratorProxy(return_val, _record_stream_agent_event, _handle_stream_agent_error) + proxied_return_val._nr_ft = ft + proxied_return_val._nr_agent_name = agent_name + proxied_return_val._nr_agent_id = agent_id + + return proxied_return_val + + +class _AutogenAsyncGeneratorProxy(AsyncGeneratorProxy): + """AsyncGeneratorProxy subclass that exits the agent FT proactively. + + The agent FT stays open during generator iteration so that tool FTs + (which execute in separate asyncio tasks) become children of the + agent FT in the trace tree. + + The FT is exited as soon as all tool children have completed, which + happens after each ``__anext__`` call that returns an item following + tool execution. This ensures the agent span is recorded even if the + consumer breaks out of the outer ``async for`` (e.g. on TaskResult) + before StopAsyncIteration fires. + + Fallbacks: + - StopAsyncIteration callback: exits FT when stream fully exhausts + (handles the no-tool case where child_count stays 0). + - aclose(): exits FT when the generator is explicitly closed. + - __del__(): last-resort safety net. + """ + + async def __anext__(self): + transaction = current_transaction() + if not transaction: + return await self._nr_wrapped_iter.__anext__() + + return_val = None + try: + return_val = await self._nr_wrapped_iter.__anext__() + except StopAsyncIteration: + self._nr_on_stop_iteration(self, transaction) + raise + except Exception: + self._nr_on_error(self, transaction) + raise + + # After each yielded item, check if all async tool children have + # completed. If so, exit the agent FT now. NR's trace system + # supports deferred completion: if the FT exits while children are + # still running, the last child's _complete_trace cascades to + # complete the parent. By exiting here (after tools finish), we + # guarantee the agent span is recorded even if the consumer breaks + # before StopAsyncIteration. + ft = getattr(self, "_nr_ft", None) + if ft and not ft.exited and ft.child_count > 0 and not ft.has_outstanding_children(): + _exit_stream_agent_ft(self) + + return return_val + + async def aclose(self): + try: + return await self.__wrapped__.aclose() + finally: + _exit_stream_agent_ft(self) + + def __del__(self): + _exit_stream_agent_ft(self) + + +def _exit_stream_agent_ft(proxy, error=False): + """Exit the agent FT stored on the proxy and record the LlmAgent event. + + Guards against double-exit: if the FT was already exited (e.g. by + StopAsyncIteration firing before aclose), this is a no-op. + """ + ft = getattr(proxy, "_nr_ft", None) + if not ft or ft.exited: + return + + agent_name = getattr(proxy, "_nr_agent_name", "agent") + agent_id = getattr(proxy, "_nr_agent_id", None) + + if error: + ft.notice_error(attributes={"agent_id": agent_id}) + ft.__exit__(*sys.exc_info()) + else: + ft.__exit__(None, None, None) - transaction.record_custom_event("LlmAgent", agent_event_dict) + transaction = current_transaction() + if not transaction: + return - return return_val + try: + agent_event_dict = _construct_base_agent_event_dict(agent_name, agent_id, transaction) + agent_event_dict["duration"] = ft.duration * 1000 + if error: + agent_event_dict["error"] = True + transaction.record_custom_event("LlmAgent", agent_event_dict) + except Exception: + _logger.warning(RECORD_EVENTS_FAILURE_LOG_MESSAGE, exc_info=True) + + +def _record_stream_agent_event(proxy, _transaction): + """Callback for AsyncGeneratorProxy when the stream finishes normally (StopAsyncIteration).""" + _exit_stream_agent_ft(proxy, error=False) + + +def _handle_stream_agent_error(proxy, _transaction): + """Callback for AsyncGeneratorProxy when the stream encounters an error.""" + _exit_stream_agent_ft(proxy, error=True) def _get_llm_metadata(transaction): @@ -180,12 +393,18 @@ async def wrap__execute_tool_call(wrapped, instance, args, kwargs): bound_args = bind_args(wrapped, args, kwargs) tool_call_data = bound_args.get("tool_call") tool_event_dict = _construct_base_tool_event_dict(bound_args, tool_call_data, tool_id, transaction, settings) - tool_name = getattr(tool_call_data, "name", "tool") - func_name = callable_name(wrapped) + + agentic_subcomponent_data = {"type": "APM-AI_TOOL", "name": tool_name} + ft = FunctionTrace(name=f"{func_name}/{tool_name}", group="Llm/tool/Autogen") ft.__enter__() + ft._add_agent_attribute("subcomponent", json.dumps(agentic_subcomponent_data)) + + # Store the tool's trace in a ContextVar so that nested agents created inside tool functions + # (which may run on thread pool threads) can find the parent trace. + _nr_tool_parent_trace.set(ft) try: return_val = await wrapped(*args, **kwargs) @@ -212,6 +431,8 @@ async def wrap__execute_tool_call(wrapped, instance, args, kwargs): def instrument_autogen_agentchat_agents__assistant_agent(module): if hasattr(module, "AssistantAgent"): + if hasattr(module.AssistantAgent, "on_messages"): + wrap_function_wrapper(module, "AssistantAgent.on_messages", wrap_on_messages) if hasattr(module.AssistantAgent, "on_messages_stream"): wrap_function_wrapper(module, "AssistantAgent.on_messages_stream", wrap_on_messages_stream) if hasattr(module.AssistantAgent, "_execute_tool_call"): diff --git a/newrelic/hooks/mlmodel_langchain.py b/newrelic/hooks/mlmodel_langchain.py index e682f1bff3..a1c22e331f 100644 --- a/newrelic/hooks/mlmodel_langchain.py +++ b/newrelic/hooks/mlmodel_langchain.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import json import logging import sys import time @@ -161,9 +162,11 @@ def invoke(self, *args, **kwargs): agent_id = str(uuid.uuid4()) agent_event_dict = _construct_base_agent_event_dict(agent_name, agent_id, transaction) function_trace_name = f"invoke/{agent_name}" + agentic_subcomponent_data = {"type": "APM-AI_AGENT", "name": agent_name} ft = FunctionTrace(name=function_trace_name, group="Llm/agent/LangChain") ft.__enter__() + ft._add_agent_attribute("subcomponent", json.dumps(agentic_subcomponent_data)) try: return_val = self.__wrapped__.invoke(*args, **kwargs) except Exception: @@ -189,9 +192,11 @@ async def ainvoke(self, *args, **kwargs): agent_id = str(uuid.uuid4()) agent_event_dict = _construct_base_agent_event_dict(agent_name, agent_id, transaction) function_trace_name = f"ainvoke/{agent_name}" + agentic_subcomponent_data = {"type": "APM-AI_AGENT", "name": agent_name} ft = FunctionTrace(name=function_trace_name, group="Llm/agent/LangChain") ft.__enter__() + ft._add_agent_attribute("subcomponent", json.dumps(agentic_subcomponent_data)) try: return_val = await self.__wrapped__.ainvoke(*args, **kwargs) except Exception: @@ -217,9 +222,11 @@ def stream(self, *args, **kwargs): agent_id = str(uuid.uuid4()) agent_event_dict = _construct_base_agent_event_dict(agent_name, agent_id, transaction) function_trace_name = f"stream/{agent_name}" + agentic_subcomponent_data = {"type": "APM-AI_AGENT", "name": agent_name} ft = FunctionTrace(name=function_trace_name, group="Llm/agent/LangChain") ft.__enter__() + ft._add_agent_attribute("subcomponent", json.dumps(agentic_subcomponent_data)) try: return_val = self.__wrapped__.stream(*args, **kwargs) return_val = GeneratorProxy( @@ -242,9 +249,11 @@ def astream(self, *args, **kwargs): agent_id = str(uuid.uuid4()) agent_event_dict = _construct_base_agent_event_dict(agent_name, agent_id, transaction) function_trace_name = f"astream/{agent_name}" + agentic_subcomponent_data = {"type": "APM-AI_AGENT", "name": agent_name} ft = FunctionTrace(name=function_trace_name, group="Llm/agent/LangChain") ft.__enter__() + ft._add_agent_attribute("subcomponent", json.dumps(agentic_subcomponent_data)) try: return_val = self.__wrapped__.astream(*args, **kwargs) return_val = AsyncGeneratorProxy( @@ -267,9 +276,11 @@ def transform(self, *args, **kwargs): agent_id = str(uuid.uuid4()) agent_event_dict = _construct_base_agent_event_dict(agent_name, agent_id, transaction) function_trace_name = f"stream/{agent_name}" + agentic_subcomponent_data = {"type": "APM-AI_AGENT", "name": agent_name} ft = FunctionTrace(name=function_trace_name, group="Llm/agent/LangChain") ft.__enter__() + ft._add_agent_attribute("subcomponent", json.dumps(agentic_subcomponent_data)) try: return_val = self.__wrapped__.transform(*args, **kwargs) return_val = GeneratorProxy( @@ -292,9 +303,11 @@ def atransform(self, *args, **kwargs): agent_id = str(uuid.uuid4()) agent_event_dict = _construct_base_agent_event_dict(agent_name, agent_id, transaction) function_trace_name = f"astream/{agent_name}" + agentic_subcomponent_data = {"type": "APM-AI_AGENT", "name": agent_name} ft = FunctionTrace(name=function_trace_name, group="Llm/agent/LangChain") ft.__enter__() + ft._add_agent_attribute("subcomponent", json.dumps(agentic_subcomponent_data)) try: return_val = self.__wrapped__.atransform(*args, **kwargs) return_val = AsyncGeneratorProxy( @@ -512,8 +525,11 @@ def wrap_tool_sync_run(wrapped, instance, args, kwargs): except Exception: filtered_tool_input = tool_input + agentic_subcomponent_data = {"type": "APM-AI_TOOL", "name": tool_name} + ft = FunctionTrace(name=f"{wrapped.__name__}/{tool_name}", group="Llm/tool/LangChain") ft.__enter__() + ft._add_agent_attribute("subcomponent", json.dumps(agentic_subcomponent_data)) linking_metadata = get_trace_linking_metadata() try: return_val = wrapped(**run_args) @@ -573,8 +589,11 @@ async def wrap_tool_async_run(wrapped, instance, args, kwargs): except Exception: filtered_tool_input = tool_input + agentic_subcomponent_data = {"type": "APM-AI_TOOL", "name": tool_name} + ft = FunctionTrace(name=f"{wrapped.__name__}/{tool_name}", group="Llm/tool/LangChain") ft.__enter__() + ft._add_agent_attribute("subcomponent", json.dumps(agentic_subcomponent_data)) linking_metadata = get_trace_linking_metadata() try: return_val = await wrapped(**run_args) diff --git a/newrelic/hooks/mlmodel_strands.py b/newrelic/hooks/mlmodel_strands.py index a4ac6e5d72..9001b95ed2 100644 --- a/newrelic/hooks/mlmodel_strands.py +++ b/newrelic/hooks/mlmodel_strands.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +import contextvars +import json import logging import sys import uuid @@ -31,6 +33,10 @@ _logger = logging.getLogger(__name__) STRANDS_VERSION = get_package_version("strands-agents") +# ContextVar used to propagate trace context to sync @tool functions that run on thread pool threads +# (via asyncio.to_thread). This allows nested agents created inside sync tools to find the parent trace. +_nr_tool_parent_trace = contextvars.ContextVar("_nr_tool_parent_trace", default=None) + RECORD_EVENTS_FAILURE_LOG_MESSAGE = "Exception occurred in Strands instrumentation: Failed to record LLM events. Please report this issue to New Relic Support." TOOL_OUTPUT_FAILURE_LOG_MESSAGE = "Exception occurred in Strands instrumentation: Failed to record output of tool call. Please report this issue to New Relic Support." AGENT_EVENT_FAILURE_LOG_MESSAGE = "Exception occurred in Strands instrumentation: Failed to record agent data. Please report this issue to New Relic Support." @@ -40,6 +46,11 @@ def wrap_agent__call__(wrapped, instance, args, kwargs): trace = current_trace() + if not trace: + # When a sync @tool function creates an inner agent, the tool runs on a thread pool thread + # (via asyncio.to_thread) where NR's thread-local context is lost. The ContextVar is propagated + # by asyncio.to_thread, so we can recover the parent trace from it. + trace = _nr_tool_parent_trace.get(None) if not trace: return wrapped(*args, **kwargs) @@ -94,9 +105,12 @@ def wrap_stream_async(wrapped, instance, args, kwargs): func_name = callable_name(wrapped) agent_name = getattr(instance, "name", "agent") function_trace_name = f"{func_name}/{agent_name}" + agentic_subcomponent_data = {"type": "APM-AI_AGENT", "name": agent_name} ft = FunctionTrace(name=function_trace_name, group="Llm/agent/Strands") ft.__enter__() + ft._add_agent_attribute("subcomponent", json.dumps(agentic_subcomponent_data)) + linking_metadata = get_trace_linking_metadata() agent_id = str(uuid.uuid4()) @@ -105,7 +119,6 @@ def wrap_stream_async(wrapped, instance, args, kwargs): except Exception: raise - # For streaming responses, wrap with proxy and attach metadata try: # For streaming responses, wrap with proxy and attach metadata proxied_return_val = AsyncGeneratorProxy( @@ -126,7 +139,6 @@ def _record_agent_event_on_stop_iteration(self, transaction): # Use saved linking metadata to maintain correct span association linking_metadata = self._nr_metadata or get_trace_linking_metadata() self._nr_ft.__exit__(None, None, None) - try: strands_attrs = getattr(self, "_nr_strands_attrs", {}) @@ -352,9 +364,16 @@ def wrap_tool_executor__stream(wrapped, instance, args, kwargs): func_name = callable_name(wrapped) function_trace_name = f"{func_name}/{tool_name}" + agentic_subcomponent_data = {"type": "APM-AI_TOOL", "name": tool_name} ft = FunctionTrace(name=function_trace_name, group="Llm/tool/Strands") ft.__enter__() + ft._add_agent_attribute("subcomponent", json.dumps(agentic_subcomponent_data)) + + # Store the tool's trace in a ContextVar so that nested agents created inside sync @tool functions + # (which run on thread pool threads via asyncio.to_thread) can find the parent trace. + _nr_tool_parent_trace.set(ft) + linking_metadata = get_trace_linking_metadata() tool_id = str(uuid.uuid4()) diff --git a/tests/adapter_mcp/test_mcp.py b/tests/adapter_mcp/test_mcp.py index 5ba6a81074..5424b57ca7 100644 --- a/tests/adapter_mcp/test_mcp.py +++ b/tests/adapter_mcp/test_mcp.py @@ -19,6 +19,7 @@ from mcp.server.fastmcp.tools import ToolManager from testing_support.ml_testing_utils import disabled_ai_monitoring_settings from testing_support.validators.validate_function_not_called import validate_function_not_called +from testing_support.validators.validate_span_events import validate_span_events from testing_support.validators.validate_transaction_metrics import validate_transaction_metrics from newrelic.api.background_task import background_task @@ -57,6 +58,7 @@ def echo_prompt(message: str): rollup_metrics=[("Llm/tool/MCP/mcp.client.session:ClientSession.call_tool/add_exclamation", 1)], background_task=True, ) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_TOOL", "name": "add_exclamation"}'}) @background_task() def test_tool_tracing_via_client_session(loop, fastmcp_server): async def _test(): @@ -75,6 +77,7 @@ async def _test(): rollup_metrics=[("Llm/tool/MCP/mcp.server.fastmcp.tools.tool_manager:ToolManager.call_tool/add_exclamation", 1)], background_task=True, ) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_TOOL", "name": "add_exclamation"}'}) @background_task() def test_tool_tracing_via_tool_manager(loop): async def _test(): diff --git a/tests/mlmodel_autogen/test_assistant_agent.py b/tests/mlmodel_autogen/test_assistant_agent.py index 866b3b39df..3b2ea8c4ed 100644 --- a/tests/mlmodel_autogen/test_assistant_agent.py +++ b/tests/mlmodel_autogen/test_assistant_agent.py @@ -16,7 +16,7 @@ import pytest from autogen_agentchat.agents import AssistantAgent from autogen_agentchat.base import TaskResult -from testing_support.fixtures import reset_core_stats_engine, validate_attributes +from testing_support.fixtures import dt_enabled, reset_core_stats_engine, validate_attributes from testing_support.ml_testing_utils import ( disabled_ai_monitoring_record_content_settings, disabled_ai_monitoring_settings, @@ -27,6 +27,7 @@ from testing_support.validators.validate_custom_event import validate_custom_event_count from testing_support.validators.validate_custom_events import validate_custom_events from testing_support.validators.validate_error_trace_attributes import validate_error_trace_attributes +from testing_support.validators.validate_span_events import validate_span_events from testing_support.validators.validate_transaction_error_event_count import validate_transaction_error_event_count from testing_support.validators.validate_transaction_metrics import validate_transaction_metrics @@ -97,6 +98,7 @@ def add_exclamation(message: str) -> str: return f"{message}!" +@dt_enabled @reset_core_stats_engine() @validate_custom_events( events_with_context_attrs(tool_recorded_event) + events_with_context_attrs(agent_recorded_event) @@ -127,6 +129,8 @@ def add_exclamation(message: str) -> str: background_task=True, ) @validate_attributes("agent", ["llm"]) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_AGENT", "name": "pirate_agent"}'}) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_TOOL", "name": "add_exclamation"}'}) @background_task() def test_run_assistant_agent(loop, set_trace_info, single_tool_model_client): set_trace_info() @@ -142,6 +146,7 @@ async def _test(): loop.run_until_complete(_test()) +@dt_enabled @reset_core_stats_engine() @validate_custom_events(tool_recorded_event + agent_recorded_event) @validate_custom_event_count(count=2) @@ -170,6 +175,8 @@ async def _test(): background_task=True, ) @validate_attributes("agent", ["llm"]) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_AGENT", "name": "pirate_agent"}'}) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_TOOL", "name": "add_exclamation"}'}) @background_task() def test_run_stream_assistant_agent(loop, set_trace_info, single_tool_model_client): set_trace_info() @@ -192,6 +199,7 @@ async def _test(): loop.run_until_complete(_test()) +@dt_enabled @reset_core_stats_engine() @disabled_ai_monitoring_record_content_settings @validate_custom_events(tool_events_sans_content(tool_recorded_event) + agent_recorded_event) @@ -221,6 +229,8 @@ async def _test(): background_task=True, ) @validate_attributes("agent", ["llm"]) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_AGENT", "name": "pirate_agent"}'}) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_TOOL", "name": "add_exclamation"}'}) @background_task() def test_run_assistant_agent_no_content(loop, set_trace_info, single_tool_model_client): set_trace_info() @@ -235,6 +245,7 @@ async def _test(): loop.run_until_complete(_test()) +@dt_enabled @disabled_ai_monitoring_settings @reset_core_stats_engine() @validate_custom_event_count(count=0) @@ -259,6 +270,7 @@ async def _test(): @SKIP_IF_AUTOGEN_062 +@dt_enabled @reset_core_stats_engine() @validate_transaction_error_event_count(1) @validate_error_trace_attributes(callable_name(TypeError), exact_attrs={"agent": {}, "intrinsic": {}, "user": {}}) @@ -289,6 +301,8 @@ async def _test(): background_task=True, ) @validate_attributes("agent", ["llm"]) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_AGENT", "name": "pirate_agent"}'}) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_TOOL", "name": "add_exclamation"}'}) @background_task() def test_run_assistant_agent_error(loop, set_trace_info, single_tool_model_client_error): set_trace_info() @@ -306,6 +320,7 @@ async def _test(): loop.run_until_complete(_test()) +@dt_enabled @reset_core_stats_engine() @validate_custom_event_count(count=0) def test_run_assistant_agent_outside_txn(loop, single_tool_model_client): diff --git a/tests/mlmodel_autogen/test_teams.py b/tests/mlmodel_autogen/test_teams.py index fa8b8ca3f6..f0d6123223 100644 --- a/tests/mlmodel_autogen/test_teams.py +++ b/tests/mlmodel_autogen/test_teams.py @@ -17,7 +17,7 @@ from autogen_agentchat.base import TaskResult from autogen_agentchat.teams import RoundRobinGroupChat from test_assistant_agent import SKIP_IF_AUTOGEN_062 -from testing_support.fixtures import reset_core_stats_engine, validate_attributes +from testing_support.fixtures import dt_enabled, reset_core_stats_engine, validate_attributes from testing_support.ml_testing_utils import ( disabled_ai_monitoring_record_content_settings, disabled_ai_monitoring_settings, @@ -28,6 +28,7 @@ from testing_support.validators.validate_custom_event import validate_custom_event_count from testing_support.validators.validate_custom_events import validate_custom_events from testing_support.validators.validate_error_trace_attributes import validate_error_trace_attributes +from testing_support.validators.validate_span_events import validate_span_events from testing_support.validators.validate_transaction_error_event_count import validate_transaction_error_event_count from testing_support.validators.validate_transaction_metrics import validate_transaction_metrics @@ -175,6 +176,7 @@ def compute_sum(a: int, b: int) -> int: return a + b +@dt_enabled @reset_core_stats_engine() @validate_custom_event_count(count=8) @validate_transaction_metrics( @@ -213,6 +215,10 @@ def compute_sum(a: int, b: int) -> int: background_task=True, ) @validate_attributes("agent", ["llm"]) +@validate_span_events(count=2, exact_agents={"subcomponent": '{"type": "APM-AI_AGENT", "name": "pirate_agent"}'}) +@validate_span_events(count=2, exact_agents={"subcomponent": '{"type": "APM-AI_TOOL", "name": "add_exclamation"}'}) +@validate_span_events(count=2, exact_agents={"subcomponent": '{"type": "APM-AI_AGENT", "name": "robot_agent"}'}) +@validate_span_events(count=2, exact_agents={"subcomponent": '{"type": "APM-AI_TOOL", "name": "compute_sum"}'}) @background_task() def test_run_stream_round_robin_group(loop, set_trace_info, multi_tool_model_client): set_trace_info() @@ -250,6 +256,7 @@ async def _test(): loop.run_until_complete(_test()) +@dt_enabled @reset_core_stats_engine() @validate_custom_event_count(count=8) @validate_transaction_metrics( @@ -287,6 +294,10 @@ async def _test(): background_task=True, ) @validate_attributes("agent", ["llm"]) +@validate_span_events(count=2, exact_agents={"subcomponent": '{"type": "APM-AI_AGENT", "name": "pirate_agent"}'}) +@validate_span_events(count=2, exact_agents={"subcomponent": '{"type": "APM-AI_TOOL", "name": "add_exclamation"}'}) +@validate_span_events(count=2, exact_agents={"subcomponent": '{"type": "APM-AI_AGENT", "name": "robot_agent"}'}) +@validate_span_events(count=2, exact_agents={"subcomponent": '{"type": "APM-AI_TOOL", "name": "compute_sum"}'}) @background_task() def test_run_round_robin_group(loop, set_trace_info, multi_tool_model_client): set_trace_info() @@ -319,6 +330,7 @@ async def _test(): loop.run_until_complete(_test()) +@dt_enabled @reset_core_stats_engine() @disabled_ai_monitoring_record_content_settings @validate_custom_events(tool_events_sans_content(team_tools_recorded_events) + team_agent_recorded_events) @@ -358,6 +370,10 @@ async def _test(): background_task=True, ) @validate_attributes("agent", ["llm"]) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_AGENT", "name": "pirate_agent"}'}) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_TOOL", "name": "add_exclamation"}'}) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_AGENT", "name": "robot_agent"}'}) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_TOOL", "name": "compute_sum"}'}) @background_task() def test_run_round_robin_group_no_content(loop, set_trace_info, multi_tool_model_client): set_trace_info() @@ -385,6 +401,7 @@ async def _test(): loop.run_until_complete(_test()) +@dt_enabled @disabled_ai_monitoring_settings @reset_core_stats_engine() @validate_custom_event_count(count=0) @@ -416,6 +433,7 @@ async def _test(): @SKIP_IF_AUTOGEN_062 +@dt_enabled @reset_core_stats_engine() @validate_transaction_error_event_count(1) @validate_error_trace_attributes(callable_name(TypeError), exact_attrs={"agent": {}, "intrinsic": {}, "user": {}}) @@ -456,6 +474,10 @@ async def _test(): background_task=True, ) @validate_attributes("agent", ["llm"]) +@validate_span_events(count=2, exact_agents={"subcomponent": '{"type": "APM-AI_AGENT", "name": "pirate_agent"}'}) +@validate_span_events(count=2, exact_agents={"subcomponent": '{"type": "APM-AI_TOOL", "name": "add_exclamation"}'}) +@validate_span_events(count=2, exact_agents={"subcomponent": '{"type": "APM-AI_AGENT", "name": "robot_agent"}'}) +@validate_span_events(count=2, exact_agents={"subcomponent": '{"type": "APM-AI_TOOL", "name": "compute_sum"}'}) @background_task() def test_run_round_robin_group_error(loop, set_trace_info, multi_tool_model_client_error): set_trace_info() @@ -485,6 +507,7 @@ async def _test(): loop.run_until_complete(_test()) +@dt_enabled @reset_core_stats_engine() @validate_custom_event_count(count=0) def test_run_round_robin_group_outside_txn(loop, multi_tool_model_client): diff --git a/tests/mlmodel_langchain/test_agents.py b/tests/mlmodel_langchain/test_agents.py index 9ec7b20dff..6a1c471ecd 100644 --- a/tests/mlmodel_langchain/test_agents.py +++ b/tests/mlmodel_langchain/test_agents.py @@ -15,7 +15,7 @@ import pytest from langchain.messages import HumanMessage from langchain.tools import tool -from testing_support.fixtures import reset_core_stats_engine, validate_attributes +from testing_support.fixtures import dt_enabled, reset_core_stats_engine, validate_attributes from testing_support.ml_testing_utils import ( disabled_ai_monitoring_record_content_settings, disabled_ai_monitoring_settings, @@ -24,6 +24,7 @@ from testing_support.validators.validate_custom_event import validate_custom_event_count from testing_support.validators.validate_custom_events import validate_custom_events from testing_support.validators.validate_error_trace_attributes import validate_error_trace_attributes +from testing_support.validators.validate_span_events import validate_span_events from testing_support.validators.validate_transaction_error_event_count import validate_transaction_error_event_count from testing_support.validators.validate_transaction_metrics import validate_transaction_metrics @@ -76,6 +77,7 @@ def add_exclamation(message: str) -> str: return f"{message}!" +@dt_enabled @reset_core_stats_engine() def test_agent(exercise_agent, create_agent_runnable, set_trace_info, method_name): @validate_custom_events(events_with_context_attrs(agent_recorded_event)) @@ -87,6 +89,8 @@ def test_agent(exercise_agent, create_agent_runnable, set_trace_info, method_nam background_task=True, ) @validate_attributes("agent", ["llm"]) + @validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_AGENT", "name": "my_agent"}'}) + @validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_TOOL", "name": "add_exclamation"}'}) @background_task(name="test_agent") def _test(): set_trace_info() @@ -100,6 +104,7 @@ def _test(): _test() +@dt_enabled @reset_core_stats_engine() @disabled_ai_monitoring_record_content_settings def test_agent_no_content(exercise_agent, create_agent_runnable, set_trace_info, method_name): @@ -112,6 +117,8 @@ def test_agent_no_content(exercise_agent, create_agent_runnable, set_trace_info, background_task=True, ) @validate_attributes("agent", ["llm"]) + @validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_AGENT", "name": "my_agent"}'}) + @validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_TOOL", "name": "add_exclamation"}'}) @background_task(name="test_agent_no_content") def _test(): set_trace_info() @@ -123,6 +130,7 @@ def _test(): _test() +@dt_enabled @reset_core_stats_engine() @validate_custom_event_count(count=0) def test_agent_outside_txn(exercise_agent, create_agent_runnable): @@ -130,6 +138,7 @@ def test_agent_outside_txn(exercise_agent, create_agent_runnable): exercise_agent(my_agent, PROMPT) +@dt_enabled @disabled_ai_monitoring_settings @reset_core_stats_engine() @validate_custom_event_count(count=0) @@ -140,6 +149,7 @@ def test_agent_disabled_ai_monitoring_events(exercise_agent, create_agent_runnab exercise_agent(my_agent, PROMPT) +@dt_enabled @reset_core_stats_engine() def test_agent_execution_error(exercise_agent, create_agent_runnable, set_trace_info, method_name, agent_runnable_type): # Add a wrapper to intentionally force an error in the Agent code @@ -159,6 +169,8 @@ def inject_exception(wrapped, instance, args, kwargs): background_task=True, ) @validate_attributes("agent", ["llm"]) + # Only an agent span is expected here and not a tool because the error is injected before the tool is called + @validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_AGENT", "name": "my_agent"}'}) @background_task(name="test_agent_execution_error") def _test(): set_trace_info() diff --git a/tests/mlmodel_langchain/test_tools.py b/tests/mlmodel_langchain/test_tools.py index 19778997db..3ad250fb45 100644 --- a/tests/mlmodel_langchain/test_tools.py +++ b/tests/mlmodel_langchain/test_tools.py @@ -14,7 +14,7 @@ import pytest from langchain.messages import HumanMessage -from testing_support.fixtures import reset_core_stats_engine, validate_attributes +from testing_support.fixtures import dt_enabled, reset_core_stats_engine, validate_attributes from testing_support.ml_testing_utils import ( disabled_ai_monitoring_record_content_settings, events_with_context_attrs, @@ -23,6 +23,7 @@ from testing_support.validators.validate_custom_event import validate_custom_event_count from testing_support.validators.validate_custom_events import validate_custom_events from testing_support.validators.validate_error_trace_attributes import validate_error_trace_attributes +from testing_support.validators.validate_span_events import validate_span_events from testing_support.validators.validate_transaction_error_event_count import validate_transaction_error_event_count from testing_support.validators.validate_transaction_metrics import validate_transaction_metrics @@ -95,6 +96,7 @@ ] +@dt_enabled @reset_core_stats_engine() def test_tool(exercise_agent, set_trace_info, create_agent_runnable, add_exclamation, tool_method_name): @validate_custom_events(events_with_context_attrs(tool_recorded_event)) @@ -106,6 +108,8 @@ def test_tool(exercise_agent, set_trace_info, create_agent_runnable, add_exclama background_task=True, ) @validate_attributes("agent", ["llm"]) + @validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_AGENT", "name": "my_agent"}'}) + @validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_TOOL", "name": "add_exclamation"}'}) @background_task(name="test_tool") def _test(): set_trace_info() @@ -119,6 +123,7 @@ def _test(): _test() +@dt_enabled @reset_core_stats_engine() @disabled_ai_monitoring_record_content_settings def test_tool_no_content(exercise_agent, set_trace_info, create_agent_runnable, add_exclamation, tool_method_name): @@ -131,6 +136,8 @@ def test_tool_no_content(exercise_agent, set_trace_info, create_agent_runnable, background_task=True, ) @validate_attributes("agent", ["llm"]) + @validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_AGENT", "name": "my_agent"}'}) + @validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_TOOL", "name": "add_exclamation"}'}) @background_task(name="test_tool_no_content") def _test(): set_trace_info() @@ -142,6 +149,7 @@ def _test(): _test() +@dt_enabled @reset_core_stats_engine() def test_tool_execution_error(exercise_agent, set_trace_info, create_agent_runnable, add_exclamation, tool_method_name): @validate_transaction_error_event_count(1) @@ -157,6 +165,8 @@ def test_tool_execution_error(exercise_agent, set_trace_info, create_agent_runna background_task=True, ) @validate_attributes("agent", ["llm"]) + @validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_AGENT", "name": "my_agent"}'}) + @validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_TOOL", "name": "add_exclamation"}'}) @background_task(name="test_tool_execution_error") def _test(): set_trace_info() @@ -169,6 +179,7 @@ def _test(): _test() +@dt_enabled @reset_core_stats_engine() def test_tool_pre_execution_exception( exercise_agent, set_trace_info, create_agent_runnable, add_exclamation, tool_method_name @@ -190,6 +201,8 @@ def inject_exception(wrapped, instance, args, kwargs): background_task=True, ) @validate_attributes("agent", ["llm"]) + @validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_AGENT", "name": "my_agent"}'}) + @validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_TOOL", "name": "add_exclamation"}'}) @background_task(name="test_tool_pre_execution_exception") def _test(): set_trace_info() diff --git a/tests/mlmodel_strands/test_agents.py b/tests/mlmodel_strands/test_agents.py index b0a1965eea..93d635a716 100644 --- a/tests/mlmodel_strands/test_agents.py +++ b/tests/mlmodel_strands/test_agents.py @@ -14,7 +14,7 @@ import pytest from strands import Agent -from testing_support.fixtures import reset_core_stats_engine, validate_attributes +from testing_support.fixtures import dt_enabled, reset_core_stats_engine, validate_attributes from testing_support.ml_testing_utils import ( disabled_ai_monitoring_record_content_settings, disabled_ai_monitoring_settings, @@ -23,6 +23,7 @@ from testing_support.validators.validate_custom_event import validate_custom_event_count from testing_support.validators.validate_custom_events import validate_custom_events from testing_support.validators.validate_error_trace_attributes import validate_error_trace_attributes +from testing_support.validators.validate_span_events import validate_span_events from testing_support.validators.validate_transaction_error_event_count import validate_transaction_error_event_count from testing_support.validators.validate_transaction_metrics import validate_transaction_metrics @@ -65,6 +66,7 @@ ] +@dt_enabled @reset_core_stats_engine() @validate_custom_events(events_with_context_attrs(agent_recorded_event)) @validate_custom_event_count(count=2) @@ -75,6 +77,8 @@ background_task=True, ) @validate_attributes("agent", ["llm"]) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_AGENT", "name": "my_agent"}'}) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_TOOL", "name": "add_exclamation"}'}) @background_task() def test_agent(exercise_agent, set_trace_info, single_tool_model): set_trace_info() @@ -97,6 +101,7 @@ def test_agent(exercise_agent, set_trace_info, single_tool_model): assert response.metrics.tool_metrics["add_exclamation"].success_count == 1 +@dt_enabled @reset_core_stats_engine() @disabled_ai_monitoring_record_content_settings @validate_custom_events(agent_recorded_event) @@ -108,6 +113,8 @@ def test_agent(exercise_agent, set_trace_info, single_tool_model): background_task=True, ) @validate_attributes("agent", ["llm"]) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_AGENT", "name": "my_agent"}'}) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_TOOL", "name": "add_exclamation"}'}) @background_task() def test_agent_no_content(exercise_agent, set_trace_info, single_tool_model): set_trace_info() @@ -129,6 +136,7 @@ def test_agent_no_content(exercise_agent, set_trace_info, single_tool_model): assert response.metrics.tool_metrics["add_exclamation"].success_count == 1 +@dt_enabled @reset_core_stats_engine() @validate_custom_event_count(count=0) def test_agent_outside_txn(exercise_agent, single_tool_model): @@ -150,6 +158,7 @@ def test_agent_outside_txn(exercise_agent, single_tool_model): assert response.metrics.tool_metrics["add_exclamation"].success_count == 1 +@dt_enabled @disabled_ai_monitoring_settings @reset_core_stats_engine() @validate_custom_event_count(count=0) @@ -174,6 +183,7 @@ def test_agent_disabled_ai_monitoring_events(exercise_agent, set_trace_info, sin assert response.metrics.tool_metrics["add_exclamation"].success_count == 1 +@dt_enabled @reset_core_stats_engine() @validate_transaction_error_event_count(1) @validate_error_trace_attributes(callable_name(ValueError), exact_attrs={"agent": {}, "intrinsic": {}, "user": {}}) @@ -186,6 +196,8 @@ def test_agent_disabled_ai_monitoring_events(exercise_agent, set_trace_info, sin background_task=True, ) @validate_attributes("agent", ["llm"]) +# Only an agent span is expected here and not a tool because the error is injected before the tool is called +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_AGENT", "name": "my_agent"}'}) @background_task() def test_agent_execution_error(exercise_agent, set_trace_info, single_tool_model): # Add a wrapper to intentionally force an error in the Agent code diff --git a/tests/mlmodel_strands/test_multiagent_graph.py b/tests/mlmodel_strands/test_multiagent_graph.py index 7bd84fc901..216a6bd3a5 100644 --- a/tests/mlmodel_strands/test_multiagent_graph.py +++ b/tests/mlmodel_strands/test_multiagent_graph.py @@ -12,10 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from testing_support.fixtures import reset_core_stats_engine, validate_attributes +from testing_support.fixtures import dt_enabled, reset_core_stats_engine, validate_attributes from testing_support.ml_testing_utils import disabled_ai_monitoring_settings, events_with_context_attrs from testing_support.validators.validate_custom_event import validate_custom_event_count from testing_support.validators.validate_custom_events import validate_custom_events +from testing_support.validators.validate_span_events import validate_span_events from testing_support.validators.validate_transaction_metrics import validate_transaction_metrics from newrelic.api.background_task import background_task @@ -86,6 +87,7 @@ ] +@dt_enabled @reset_core_stats_engine() @validate_custom_events(events_with_context_attrs(tool_recorded_events)) @validate_custom_events(events_with_context_attrs(agent_recorded_events)) @@ -107,6 +109,10 @@ background_task=True, ) @validate_attributes("agent", ["llm"]) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_AGENT", "name": "math_agent"}'}) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_TOOL", "name": "calculate_sum"}'}) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_AGENT", "name": "analysis_agent"}'}) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_TOOL", "name": "analyze_result"}'}) @background_task() def test_multiagent_graph_invoke(set_trace_info, agent_graph): set_trace_info() @@ -123,6 +129,7 @@ def test_multiagent_graph_invoke(set_trace_info, agent_graph): ) +@dt_enabled @reset_core_stats_engine() @validate_custom_events(tool_recorded_events) @validate_custom_events(agent_recorded_events) @@ -144,6 +151,10 @@ def test_multiagent_graph_invoke(set_trace_info, agent_graph): background_task=True, ) @validate_attributes("agent", ["llm"]) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_AGENT", "name": "math_agent"}'}) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_TOOL", "name": "calculate_sum"}'}) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_AGENT", "name": "analysis_agent"}'}) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_TOOL", "name": "analyze_result"}'}) @background_task() def test_multiagent_graph_invoke_async(loop, set_trace_info, agent_graph): set_trace_info() @@ -162,6 +173,7 @@ async def _test(): loop.run_until_complete(_test()) +@dt_enabled @reset_core_stats_engine() @validate_custom_events(tool_recorded_events) @validate_custom_events(agent_recorded_events) @@ -183,6 +195,10 @@ async def _test(): background_task=True, ) @validate_attributes("agent", ["llm"]) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_AGENT", "name": "math_agent"}'}) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_TOOL", "name": "calculate_sum"}'}) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_AGENT", "name": "analysis_agent"}'}) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_TOOL", "name": "analyze_result"}'}) @background_task() def test_multiagent_graph_stream_async(loop, set_trace_info, agent_graph): set_trace_info() @@ -201,6 +217,7 @@ async def _test(): loop.run_until_complete(_test()) +@dt_enabled @disabled_ai_monitoring_settings @reset_core_stats_engine() @validate_custom_event_count(count=0) @@ -219,6 +236,7 @@ def test_multiagent_graph_invoke_disabled_ai_monitoring_events(set_trace_info, a ) +@dt_enabled @reset_core_stats_engine() @validate_custom_event_count(count=0) def test_multiagent_graph_invoke_outside_txn(agent_graph): diff --git a/tests/mlmodel_strands/test_multiagent_swarm.py b/tests/mlmodel_strands/test_multiagent_swarm.py index bbcbb3e27c..dc4ee95628 100644 --- a/tests/mlmodel_strands/test_multiagent_swarm.py +++ b/tests/mlmodel_strands/test_multiagent_swarm.py @@ -12,10 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from testing_support.fixtures import reset_core_stats_engine, validate_attributes +from testing_support.fixtures import dt_enabled, reset_core_stats_engine, validate_attributes from testing_support.ml_testing_utils import disabled_ai_monitoring_settings, events_with_context_attrs from testing_support.validators.validate_custom_event import validate_custom_event_count from testing_support.validators.validate_custom_events import validate_custom_events +from testing_support.validators.validate_span_events import validate_span_events from testing_support.validators.validate_transaction_metrics import validate_transaction_metrics from newrelic.api.background_task import background_task @@ -106,6 +107,7 @@ ] +@dt_enabled @reset_core_stats_engine() @validate_custom_events(events_with_context_attrs(tool_recorded_events)) @validate_custom_events(events_with_context_attrs(agent_recorded_events)) @@ -128,6 +130,10 @@ background_task=True, ) @validate_attributes("agent", ["llm"]) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_AGENT", "name": "math_agent"}'}) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_TOOL", "name": "calculate_sum"}'}) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_AGENT", "name": "analysis_agent"}'}) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_TOOL", "name": "analyze_result"}'}) @background_task() def test_multiagent_swarm_invoke(set_trace_info, agent_swarm): set_trace_info() @@ -145,6 +151,7 @@ def test_multiagent_swarm_invoke(set_trace_info, agent_swarm): ) +@dt_enabled @reset_core_stats_engine() @validate_custom_events(tool_recorded_events) @validate_custom_events(agent_recorded_events) @@ -167,6 +174,10 @@ def test_multiagent_swarm_invoke(set_trace_info, agent_swarm): background_task=True, ) @validate_attributes("agent", ["llm"]) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_AGENT", "name": "math_agent"}'}) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_TOOL", "name": "calculate_sum"}'}) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_AGENT", "name": "analysis_agent"}'}) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_TOOL", "name": "analyze_result"}'}) @background_task() def test_multiagent_swarm_invoke_async(loop, set_trace_info, agent_swarm): set_trace_info() @@ -186,6 +197,7 @@ async def _test(): loop.run_until_complete(_test()) +@dt_enabled @reset_core_stats_engine() @validate_custom_events(tool_recorded_events) @validate_custom_events(agent_recorded_events) @@ -208,6 +220,10 @@ async def _test(): background_task=True, ) @validate_attributes("agent", ["llm"]) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_AGENT", "name": "math_agent"}'}) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_TOOL", "name": "calculate_sum"}'}) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_AGENT", "name": "analysis_agent"}'}) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_TOOL", "name": "analyze_result"}'}) @background_task() def test_multiagent_swarm_stream_async(loop, set_trace_info, agent_swarm): set_trace_info() @@ -226,6 +242,7 @@ async def _test(): loop.run_until_complete(_test()) +@dt_enabled @disabled_ai_monitoring_settings @reset_core_stats_engine() @validate_custom_event_count(count=0) @@ -245,6 +262,7 @@ def test_multiagent_swarm_invoke_disabled_ai_monitoring_events(set_trace_info, a ) +@dt_enabled @reset_core_stats_engine() @validate_custom_event_count(count=0) def test_multiagent_swarm_invoke_outside_txn(agent_swarm): diff --git a/tests/mlmodel_strands/test_tools.py b/tests/mlmodel_strands/test_tools.py index a5e62ff3a3..bc9eb233c7 100644 --- a/tests/mlmodel_strands/test_tools.py +++ b/tests/mlmodel_strands/test_tools.py @@ -14,7 +14,7 @@ import pytest from strands import Agent -from testing_support.fixtures import reset_core_stats_engine, validate_attributes +from testing_support.fixtures import dt_enabled, reset_core_stats_engine, validate_attributes from testing_support.ml_testing_utils import ( disabled_ai_monitoring_record_content_settings, events_with_context_attrs, @@ -23,6 +23,7 @@ from testing_support.validators.validate_custom_event import validate_custom_event_count from testing_support.validators.validate_custom_events import validate_custom_events from testing_support.validators.validate_error_trace_attributes import validate_error_trace_attributes +from testing_support.validators.validate_span_events import validate_span_events from testing_support.validators.validate_transaction_error_event_count import validate_transaction_error_event_count from testing_support.validators.validate_transaction_metrics import validate_transaction_metrics @@ -94,6 +95,7 @@ EXPECTED_ERROR_MESSAGES = ["Error: RuntimeError - Oops", "Error: Oops"] +@dt_enabled @reset_core_stats_engine() @validate_custom_events(events_with_context_attrs(tool_recorded_event)) @validate_custom_event_count(count=2) @@ -104,6 +106,8 @@ background_task=True, ) @validate_attributes("agent", ["llm"]) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_AGENT", "name": "my_agent"}'}) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_TOOL", "name": "add_exclamation"}'}) @background_task() def test_tool(exercise_agent, set_trace_info, single_tool_model, add_exclamation): set_trace_info() @@ -126,6 +130,7 @@ def test_tool(exercise_agent, set_trace_info, single_tool_model, add_exclamation assert response.metrics.tool_metrics["add_exclamation"].success_count == 1 +@dt_enabled @reset_core_stats_engine() @disabled_ai_monitoring_record_content_settings @validate_custom_events(tool_events_sans_content(tool_recorded_event)) @@ -137,6 +142,8 @@ def test_tool(exercise_agent, set_trace_info, single_tool_model, add_exclamation background_task=True, ) @validate_attributes("agent", ["llm"]) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_AGENT", "name": "my_agent"}'}) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_TOOL", "name": "add_exclamation"}'}) @background_task() def test_tool_no_content(exercise_agent, set_trace_info, single_tool_model, add_exclamation): set_trace_info() @@ -158,6 +165,7 @@ def test_tool_no_content(exercise_agent, set_trace_info, single_tool_model, add_ assert response.metrics.tool_metrics["add_exclamation"].success_count == 1 +@dt_enabled @reset_core_stats_engine() def test_tool_execution_error(exercise_agent, set_trace_info, single_tool_model_error, add_exclamation): from strands.tools import PythonAgentTool @@ -178,6 +186,8 @@ def test_tool_execution_error(exercise_agent, set_trace_info, single_tool_model_ background_task=True, ) @validate_attributes("agent", ["llm"]) + @validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_AGENT", "name": "my_agent"}'}) + @validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_TOOL", "name": "add_exclamation"}'}) @background_task(name="test_tool_execution_error") def _test(): set_trace_info() @@ -201,6 +211,7 @@ def _test(): _test() +@dt_enabled @reset_core_stats_engine() @validate_transaction_error_event_count(1) @validate_error_trace_attributes(callable_name(ValueError), exact_attrs={"agent": {}, "intrinsic": {}, "user": {}}) @@ -213,6 +224,8 @@ def _test(): background_task=True, ) @validate_attributes("agent", ["llm"]) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_AGENT", "name": "my_agent"}'}) +@validate_span_events(count=1, exact_agents={"subcomponent": '{"type": "APM-AI_TOOL", "name": "add_exclamation"}'}) @background_task() def test_tool_pre_execution_exception(exercise_agent, set_trace_info, single_tool_model, add_exclamation): # Add a wrapper to intentionally force an error in the ToolExecutor._stream code to hit the exception path in