From d4c61a20378807d367c68e9ea6e8db3f176f19d5 Mon Sep 17 00:00:00 2001 From: Andrei Borza Date: Thu, 16 Apr 2026 15:22:00 +0900 Subject: [PATCH] feat(core): Instrument langgraph createReactAgent Add instrumentation for LangGraph's `createReactAgent` API with full span hierarchy: invoke_agent, gen_ai.chat, and execute_tool. createReactAgent wrapping: - Extract agent name, LLM model, and tools from params - Wrap compiled graph's invoke() with invoke_agent span - Wrap tool invoke() with execute_tool spans (name, type, description, arguments, result) - Inject LangChain callback handler + lc_agent_name metadata at invoke level for chat span creation and agent name propagation to all child spans - Suppress StateGraph.compile instrumentation inside createReactAgent to avoid duplicate spans LangChain callback handler improvements: - Read gen_ai.agent.name from metadata.lc_agent_name - Suppress chain and tool callback spans inside agent context to avoid duplicates with our direct instrumentation - Extract tool definitions from extraParams in handleChatModelStart - Use runName for tool name (set by LangChain's StructuredTool) - Add gen_ai.operation.name to tool spans - Extract ToolMessage .content in handleToolEnd BREAKING: addToolCallsAttributes now reads from message.tool_calls (LangChain's normalized format) instead of scanning message.content for Anthropic-style tool_use items. This fixes duplicate tool calls on Anthropic chat spans but changes the tool call format in gen_ai.response.tool_calls from Anthropic-native to LangChain- normalized (args instead of input, type: tool_call instead of tool_use). OTel module patching: - Patch @langchain/langgraph/prebuilt for createReactAgent (ESM + CJS file patches for dist/prebuilt/index.cjs) Exports: - instrumentCreateReactAgent from core, browser, cloudflare Co-Authored-By: Claude Opus 4.6 --- .../tracing/langgraph/agent-scenario.mjs | 65 ++++++++ .../langgraph/agent-tools-scenario.mjs | 122 ++++++++++++++ .../tracing/langgraph/instrument-agent.mjs | 17 ++ .../suites/tracing/langgraph/test.ts | 84 ++++++++++ packages/browser/src/index.ts | 1 + packages/cloudflare/src/index.ts | 1 + packages/core/src/index.ts | 2 +- packages/core/src/tracing/langchain/index.ts | 53 +++++- packages/core/src/tracing/langchain/types.ts | 8 + packages/core/src/tracing/langchain/utils.ts | 56 +++++-- packages/core/src/tracing/langgraph/index.ts | 100 ++++++++++- packages/core/src/tracing/langgraph/utils.ts | 157 +++++++++++++++++- .../test/lib/utils/langgraph-utils.test.ts | 33 ++++ .../tracing/langgraph/instrumentation.ts | 102 ++++++++---- 14 files changed, 748 insertions(+), 53 deletions(-) create mode 100644 dev-packages/node-integration-tests/suites/tracing/langgraph/agent-scenario.mjs create mode 100644 dev-packages/node-integration-tests/suites/tracing/langgraph/agent-tools-scenario.mjs create mode 100644 dev-packages/node-integration-tests/suites/tracing/langgraph/instrument-agent.mjs create mode 100644 packages/core/test/lib/utils/langgraph-utils.test.ts diff --git a/dev-packages/node-integration-tests/suites/tracing/langgraph/agent-scenario.mjs b/dev-packages/node-integration-tests/suites/tracing/langgraph/agent-scenario.mjs new file mode 100644 index 000000000000..78b0a0cba38c --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/langgraph/agent-scenario.mjs @@ -0,0 +1,65 @@ +import { ChatAnthropic } from '@langchain/anthropic'; +import { HumanMessage, SystemMessage } from '@langchain/core/messages'; +import { createReactAgent } from '@langchain/langgraph/prebuilt'; +import * as Sentry from '@sentry/node'; +import express from 'express'; + +function startMockAnthropicServer() { + const app = express(); + app.use(express.json()); + + app.post('/v1/messages', (req, res) => { + const model = req.body.model; + + res.json({ + id: 'msg_react_agent_123', + type: 'message', + role: 'assistant', + content: [ + { + type: 'text', + text: 'Paris is the capital of France.', + }, + ], + model: model, + stop_reason: 'end_turn', + stop_sequence: null, + usage: { + input_tokens: 20, + output_tokens: 10, + }, + }); + }); + + return new Promise(resolve => { + const server = app.listen(0, () => { + resolve(server); + }); + }); +} + +async function run() { + const server = await startMockAnthropicServer(); + const baseUrl = `http://localhost:${server.address().port}`; + + await Sentry.startSpan({ op: 'function', name: 'main' }, async () => { + const llm = new ChatAnthropic({ + model: 'claude-3-5-sonnet-20241022', + apiKey: 'mock-api-key', + clientOptions: { + baseURL: baseUrl, + }, + }); + + const agent = createReactAgent({ llm, tools: [], name: 'helpful_assistant' }); + + await agent.invoke({ + messages: [new SystemMessage('You are a helpful assistant.'), new HumanMessage('What is the capital of France?')], + }); + }); + + await Sentry.flush(2000); + server.close(); +} + +run(); diff --git a/dev-packages/node-integration-tests/suites/tracing/langgraph/agent-tools-scenario.mjs b/dev-packages/node-integration-tests/suites/tracing/langgraph/agent-tools-scenario.mjs new file mode 100644 index 000000000000..f499d9eff5f5 --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/langgraph/agent-tools-scenario.mjs @@ -0,0 +1,122 @@ +import { tool } from '@langchain/core/tools'; +import { ChatAnthropic } from '@langchain/anthropic'; +import { createReactAgent } from '@langchain/langgraph/prebuilt'; +import { HumanMessage } from '@langchain/core/messages'; +import * as Sentry from '@sentry/node'; +import express from 'express'; +import { z } from 'zod'; + +let callCount = 0; + +function startMockAnthropicServer() { + const app = express(); + app.use(express.json()); + + app.post('/v1/messages', (req, res) => { + callCount++; + const model = req.body.model; + + if (callCount === 1) { + // First call: model decides to call the "add" tool + res.json({ + id: 'msg_1', + type: 'message', + role: 'assistant', + content: [ + { + type: 'tool_use', + id: 'toolu_add_1', + name: 'add', + input: { a: 3, b: 5 }, + }, + ], + model: model, + stop_reason: 'tool_use', + usage: { input_tokens: 20, output_tokens: 10 }, + }); + } else if (callCount === 2) { + // Second call: model sees add result=8, calls "multiply" + res.json({ + id: 'msg_2', + type: 'message', + role: 'assistant', + content: [ + { + type: 'tool_use', + id: 'toolu_mul_1', + name: 'multiply', + input: { a: 8, b: 4 }, + }, + ], + model: model, + stop_reason: 'tool_use', + usage: { input_tokens: 30, output_tokens: 10 }, + }); + } else { + // Third call: model returns final answer + res.json({ + id: 'msg_3', + type: 'message', + role: 'assistant', + content: [{ type: 'text', text: 'The result is 32.' }], + model: model, + stop_reason: 'end_turn', + usage: { input_tokens: 40, output_tokens: 10 }, + }); + } + }); + + return new Promise(resolve => { + const server = app.listen(0, () => resolve(server)); + }); +} + +async function run() { + const server = await startMockAnthropicServer(); + const baseUrl = `http://localhost:${server.address().port}`; + + await Sentry.startSpan({ op: 'function', name: 'main' }, async () => { + const llm = new ChatAnthropic({ + model: 'claude-3-5-sonnet-20241022', + apiKey: 'mock-api-key', + clientOptions: { baseURL: baseUrl }, + }); + + const addTool = tool( + async ({ a, b }) => { + return String(a + b); + }, + { + name: 'add', + description: 'Add two numbers', + schema: z.object({ a: z.number(), b: z.number() }), + }, + ); + + const multiplyTool = tool( + async ({ a, b }) => { + return String(a * b); + }, + { + name: 'multiply', + description: 'Multiply two numbers', + schema: z.object({ a: z.number(), b: z.number() }), + }, + ); + + const agent = createReactAgent({ + llm, + tools: [addTool, multiplyTool], + name: 'math_assistant', + }); + + await agent.invoke({ + messages: [new HumanMessage('Calculate (3 + 5) * 4')], + }); + }); + + await Sentry.flush(2000); + server.close(); +} + +run(); diff --git a/dev-packages/node-integration-tests/suites/tracing/langgraph/instrument-agent.mjs b/dev-packages/node-integration-tests/suites/tracing/langgraph/instrument-agent.mjs new file mode 100644 index 000000000000..dbd4e959020a --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/langgraph/instrument-agent.mjs @@ -0,0 +1,17 @@ +import * as Sentry from '@sentry/node'; +import { loggingTransport } from '@sentry-internal/node-integration-tests'; + +Sentry.init({ + dsn: 'https://public@dsn.ingest.sentry.io/1337', + release: '1.0', + tracesSampleRate: 1.0, + sendDefaultPii: true, + transport: loggingTransport, + beforeSendTransaction: event => { + // Filter out mock express server transactions + if (event.transaction && event.transaction.includes('/v1/messages')) { + return null; + } + return event; + }, +}); diff --git a/dev-packages/node-integration-tests/suites/tracing/langgraph/test.ts b/dev-packages/node-integration-tests/suites/tracing/langgraph/test.ts index 0837efb63c2f..3309829f8f9b 100644 --- a/dev-packages/node-integration-tests/suites/tracing/langgraph/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/langgraph/test.ts @@ -445,4 +445,88 @@ describe('LangGraph integration', () => { }); }, ); + + // createReactAgent tests + const EXPECTED_TRANSACTION_REACT_AGENT = { + transaction: 'main', + spans: expect.arrayContaining([ + // invoke_agent span (no create_agent span expected) + expect.objectContaining({ + data: expect.objectContaining({ + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'helpful_assistant', + [GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'helpful_assistant', + }), + description: 'invoke_agent helpful_assistant', + op: 'gen_ai.invoke_agent', + origin: 'auto.ai.langgraph', + status: 'ok', + }), + // chat span (from Anthropic integration) should be a child with inherited agent name + expect.objectContaining({ + data: expect.objectContaining({ + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'helpful_assistant', + }), + op: 'gen_ai.chat', + }), + ]), + }; + + createEsmAndCjsTests(__dirname, 'agent-scenario.mjs', 'instrument-agent.mjs', (createRunner, test) => { + test('should instrument createReactAgent with agent and chat spans', { timeout: 30000 }, async () => { + await createRunner() + .ignore('event') + .expect({ transaction: EXPECTED_TRANSACTION_REACT_AGENT }) + .start() + .completed(); + }); + }); + + // createReactAgent with tools - verifies tool execution spans + const EXPECTED_TRANSACTION_REACT_AGENT_TOOLS = { + transaction: 'main', + spans: expect.arrayContaining([ + // invoke_agent span + expect.objectContaining({ + data: expect.objectContaining({ + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'math_assistant', + }), + op: 'gen_ai.invoke_agent', + status: 'ok', + }), + // execute_tool span for "add" + expect.objectContaining({ + data: expect.objectContaining({ + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', + 'gen_ai.tool.name': 'add', + }), + description: 'execute_tool add', + op: 'gen_ai.execute_tool', + status: 'ok', + }), + // execute_tool span for "multiply" + expect.objectContaining({ + data: expect.objectContaining({ + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', + 'gen_ai.tool.name': 'multiply', + }), + description: 'execute_tool multiply', + op: 'gen_ai.execute_tool', + status: 'ok', + }), + ]), + }; + + createEsmAndCjsTests(__dirname, 'agent-tools-scenario.mjs', 'instrument-agent.mjs', (createRunner, test) => { + test('should create tool execution spans for createReactAgent with tools', { timeout: 30000 }, async () => { + await createRunner() + .ignore('event') + .expect({ transaction: EXPECTED_TRANSACTION_REACT_AGENT_TOOLS }) + .start() + .completed(); + }); + }); }); diff --git a/packages/browser/src/index.ts b/packages/browser/src/index.ts index 844f6a170090..4709e6167b3c 100644 --- a/packages/browser/src/index.ts +++ b/packages/browser/src/index.ts @@ -72,6 +72,7 @@ export { instrumentOpenAiClient, instrumentGoogleGenAIClient, instrumentLangGraph, + instrumentCreateReactAgent, createLangChainCallbackHandler, instrumentLangChainEmbeddings, logger, diff --git a/packages/cloudflare/src/index.ts b/packages/cloudflare/src/index.ts index 961542e01446..eaa9b3ddb032 100644 --- a/packages/cloudflare/src/index.ts +++ b/packages/cloudflare/src/index.ts @@ -110,6 +110,7 @@ export { withStreamedSpan, spanStreamingIntegration, instrumentLangGraph, + instrumentCreateReactAgent, } from '@sentry/core'; export { withSentry } from './withSentry'; diff --git a/packages/core/src/index.ts b/packages/core/src/index.ts index f4039244e550..96b663d83fa8 100644 --- a/packages/core/src/index.ts +++ b/packages/core/src/index.ts @@ -178,7 +178,7 @@ export type { GoogleGenAIResponse } from './tracing/google-genai/types'; export { createLangChainCallbackHandler, instrumentLangChainEmbeddings } from './tracing/langchain'; export { LANGCHAIN_INTEGRATION_NAME } from './tracing/langchain/constants'; export type { LangChainOptions, LangChainIntegration } from './tracing/langchain/types'; -export { instrumentStateGraphCompile, instrumentLangGraph } from './tracing/langgraph'; +export { instrumentStateGraphCompile, instrumentCreateReactAgent, instrumentLangGraph } from './tracing/langgraph'; export { LANGGRAPH_INTEGRATION_NAME } from './tracing/langgraph/constants'; export type { LangGraphOptions, LangGraphIntegration, CompiledGraph } from './tracing/langgraph/types'; export type { OpenAiClient, OpenAiOptions, InstrumentedMethod } from './tracing/openai/types'; diff --git a/packages/core/src/tracing/langchain/index.ts b/packages/core/src/tracing/langchain/index.ts index 64e9058d8ce2..627a3dd240c6 100644 --- a/packages/core/src/tracing/langchain/index.ts +++ b/packages/core/src/tracing/langchain/index.ts @@ -1,3 +1,4 @@ +/* eslint-disable max-lines */ import { captureException } from '../../exports'; import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '../../semanticAttributes'; import { SPAN_STATUS_ERROR } from '../../tracing'; @@ -5,6 +6,7 @@ import { startSpanManual } from '../../tracing/trace'; import type { Span, SpanAttributeValue } from '../../types-hoist/span'; import { GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, GEN_AI_REQUEST_MODEL_ATTRIBUTE, GEN_AI_TOOL_INPUT_ATTRIBUTE, GEN_AI_TOOL_NAME_ATTRIBUTE, @@ -23,6 +25,8 @@ import { extractChatModelRequestAttributes, extractLLMRequestAttributes, extractLlmResponseAttributes, + extractToolDefinitions, + getAgentAttributesFromMetadata, getInvocationParams, } from './utils'; @@ -102,6 +106,7 @@ export function createLangChainCallbackHandler(options: LangChainOptions = {}): name: `${operationName} ${modelName}`, op: 'gen_ai.chat', attributes: { + ...getAgentAttributesFromMetadata(metadata), ...attributes, [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', }, @@ -119,7 +124,7 @@ export function createLangChainCallbackHandler(options: LangChainOptions = {}): messages: unknown, runId: string, _parentRunId?: string, - _extraParams?: Record, + extraParams?: Record, tags?: string[], metadata?: Record, _runName?: string, @@ -133,6 +138,12 @@ export function createLangChainCallbackHandler(options: LangChainOptions = {}): invocationParams, metadata, ); + + const toolDefsJson = extractToolDefinitions(extraParams); + if (toolDefsJson) { + attributes[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE] = toolDefsJson; + } + const modelName = attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]; const operationName = attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]; @@ -141,6 +152,7 @@ export function createLangChainCallbackHandler(options: LangChainOptions = {}): name: `${operationName} ${modelName}`, op: 'gen_ai.chat', attributes: { + ...getAgentAttributesFromMetadata(metadata), ...attributes, [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', }, @@ -193,17 +205,23 @@ export function createLangChainCallbackHandler(options: LangChainOptions = {}): runId: string, _parentRunId?: string, _tags?: string[], - _metadata?: Record, + metadata?: Record, _runType?: string, runName?: string, ) { + // Skip chain spans when inside an agent context (createReactAgent). + // The agent already creates an invoke_agent span; internal chain steps + // (ChannelWrite, Branch, prompt, etc.) are noise. + if (metadata?.__sentry_langgraph__) { + return; + } + const chainName = runName || chain.name || 'unknown_chain'; const attributes: Record = { [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', 'langchain.chain.name': chainName, }; - // Add inputs if recordInputs is enabled if (recordInputs) { attributes['langchain.chain.inputs'] = JSON.stringify(inputs); } @@ -255,14 +273,30 @@ export function createLangChainCallbackHandler(options: LangChainOptions = {}): }, // Tool Start Handler - handleToolStart(tool: { name?: string }, input: string, runId: string, _parentRunId?: string) { - const toolName = tool.name || 'unknown_tool'; + handleToolStart( + tool: { name?: string }, + input: string, + runId: string, + _parentRunId?: string, + _tags?: string[], + metadata?: Record, + runName?: string, + ) { + // Skip tool spans when inside an agent context (createReactAgent). + // Tool spans are created by wrapToolsWithSpans with richer attributes. + if (metadata?.__sentry_langgraph__) { + return; + } + + // runName is set to tool.name by LangChain's StructuredTool.call() + const toolName = runName || tool.name || 'unknown_tool'; const attributes: Record = { + ...getAgentAttributesFromMetadata(metadata), [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: LANGCHAIN_ORIGIN, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', [GEN_AI_TOOL_NAME_ATTRIBUTE]: toolName, }; - // Add input if recordInputs is enabled if (recordInputs) { attributes[GEN_AI_TOOL_INPUT_ATTRIBUTE] = input; } @@ -287,10 +321,13 @@ export function createLangChainCallbackHandler(options: LangChainOptions = {}): handleToolEnd(output: unknown, runId: string) { const span = spanMap.get(runId); if (span?.isRecording()) { - // Add output if recordOutputs is enabled if (recordOutputs) { + // LangChain tools may return ToolMessage objects — extract the content + const outputObj = output as Record | undefined; + const content = + outputObj && typeof outputObj === 'object' && 'content' in outputObj ? outputObj.content : output; span.setAttributes({ - [GEN_AI_TOOL_OUTPUT_ATTRIBUTE]: JSON.stringify(output), + [GEN_AI_TOOL_OUTPUT_ATTRIBUTE]: typeof content === 'string' ? content : JSON.stringify(content), }); } exitSpan(runId); diff --git a/packages/core/src/tracing/langchain/types.ts b/packages/core/src/tracing/langchain/types.ts index 1c066269aba5..0fe98b76e092 100644 --- a/packages/core/src/tracing/langchain/types.ts +++ b/packages/core/src/tracing/langchain/types.ts @@ -36,6 +36,14 @@ export interface LangChainSerialized { kwargs?: Record; } +/** + * Subset of the 'llm' param passed to createReactAgent + */ +export interface BaseChatModel { + lc_namespace: string[]; + modelName: string; +} + /** * LangChain message structure * Supports both regular messages and LangChain serialized format diff --git a/packages/core/src/tracing/langchain/utils.ts b/packages/core/src/tracing/langchain/utils.ts index 1227889f210d..d03e8463d3c8 100644 --- a/packages/core/src/tracing/langchain/utils.ts +++ b/packages/core/src/tracing/langchain/utils.ts @@ -1,7 +1,10 @@ import { SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '../../semanticAttributes'; import type { SpanAttributeValue } from '../../types-hoist/span'; import { + GEN_AI_AGENT_NAME_ATTRIBUTE, GEN_AI_INPUT_MESSAGES_ATTRIBUTE, + GEN_AI_PIPELINE_NAME_ATTRIBUTE, + GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, GEN_AI_OPERATION_NAME_ATTRIBUTE, GEN_AI_REQUEST_FREQUENCY_PENALTY_ATTRIBUTE, @@ -350,22 +353,28 @@ export function extractChatModelRequestAttributes( } /** - * Scans generations for Anthropic-style `tool_use` items and records them. - * - * LangChain represents some provider messages (e.g., Anthropic) with a `message.content` - * array that may include objects `{ type: 'tool_use', ... }`. We collect and attach - * them as a JSON array on `gen_ai.response.tool_calls` for downstream consumers. + * Extracts tool calls from generations and records them on the span attributes. + * Prefers message.tool_calls (LangChain's normalized format). Falls back to + * scanning message.content for Anthropic-style tool_use items in older versions + * where tool_calls may not be populated. */ function addToolCallsAttributes(generations: LangChainMessage[][], attrs: Record): void { const toolCalls: unknown[] = []; const flatGenerations = generations.flat(); for (const gen of flatGenerations) { - const content = gen.message?.content; - if (Array.isArray(content)) { - for (const item of content) { - const t = item as { type: string }; - if (t.type === 'tool_use') toolCalls.push(t); + const msg = gen.message as Record | undefined; + const msgToolCalls = msg?.tool_calls as unknown[] | undefined; + if (Array.isArray(msgToolCalls) && msgToolCalls.length > 0) { + toolCalls.push(...msgToolCalls); + } else { + // Fallback for older LangChain versions: scan message.content for Anthropic-style tool_use + const content = gen.message?.content; + if (Array.isArray(content)) { + for (const item of content) { + const t = item as Record; + if (t.type === 'tool_use') toolCalls.push(t); + } } } } @@ -504,3 +513,30 @@ export function extractLlmResponseAttributes( return attrs; } + +export function getAgentAttributesFromMetadata(metadata?: Record): Record { + const attrs: Record = {}; + // lc_agent_name is injected by instrumentCompiledGraphInvoke (langgraph integration) + const agentName = metadata?.lc_agent_name; + if (typeof agentName === 'string') { + attrs[GEN_AI_AGENT_NAME_ATTRIBUTE] = agentName; + attrs[GEN_AI_PIPELINE_NAME_ATTRIBUTE] = agentName; + } + return attrs; +} + +export function extractToolDefinitions(extraParams?: Record): string | undefined { + const tools = + (extraParams?.invocation_params as Record)?.tools ?? + (extraParams?.options as Record)?.tools; + if (!Array.isArray(tools) || tools.length === 0) return undefined; + const toolDefs = tools.map((tool: Record) => { + const fn = tool.function as Record | undefined; + return { + type: 'function', + name: tool.name ?? fn?.name ?? '', + description: tool.description ?? fn?.description, + }; + }); + return JSON.stringify(toolDefs); +} diff --git a/packages/core/src/tracing/langgraph/index.ts b/packages/core/src/tracing/langgraph/index.ts index d188fe90d97f..e7cede784e37 100644 --- a/packages/core/src/tracing/langgraph/index.ts +++ b/packages/core/src/tracing/langgraph/index.ts @@ -10,6 +10,7 @@ import { GEN_AI_OPERATION_NAME_ATTRIBUTE, GEN_AI_PIPELINE_NAME_ATTRIBUTE, GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, + GEN_AI_REQUEST_MODEL_ATTRIBUTE, GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE, } from '../ai/gen-ai-attributes'; import { @@ -19,12 +20,21 @@ import { resolveAIRecordingOptions, shouldEnableTruncation, } from '../ai/utils'; -import type { LangChainMessage } from '../langchain/types'; +import { createLangChainCallbackHandler } from '../langchain'; +import type { BaseChatModel, LangChainMessage } from '../langchain/types'; import { normalizeLangChainMessages } from '../langchain/utils'; import { startSpan } from '../trace'; import { LANGGRAPH_ORIGIN } from './constants'; import type { CompiledGraph, LangGraphOptions } from './types'; -import { extractToolsFromCompiledGraph, setResponseAttributes } from './utils'; +import { + extractAgentNameFromParams, + extractLLMFromParams, + extractToolsFromCompiledGraph, + setResponseAttributes, + wrapToolsWithSpans, +} from './utils'; + +let _insideCreateReactAgent = false; /** * Instruments StateGraph's compile method to create spans for agent creation and invocation @@ -40,6 +50,11 @@ export function instrumentStateGraphCompile( ): (...args: unknown[]) => CompiledGraph { return new Proxy(originalCompile, { apply(target, thisArg, args: unknown[]): CompiledGraph { + // Skip when called from within createReactAgent to avoid duplicate instrumentation + if (_insideCreateReactAgent) { + return Reflect.apply(target, thisArg, args); + } + return startSpan( { op: 'gen_ai.create_agent', @@ -99,9 +114,12 @@ function instrumentCompiledGraphInvoke( graphInstance: CompiledGraph, compileOptions: Record, options: LangGraphOptions, + llm?: BaseChatModel | null, + sentryCallbackHandler?: unknown, ): (...args: unknown[]) => Promise { return new Proxy(originalInvoke, { apply(target, thisArg, args: unknown[]): Promise { + const modelName = llm?.modelName; return startSpan( { op: 'gen_ai.invoke_agent', @@ -122,6 +140,10 @@ function instrumentCompiledGraphInvoke( span.updateName(`invoke_agent ${graphName}`); } + if (modelName) { + span.setAttribute(GEN_AI_REQUEST_MODEL_ATTRIBUTE, modelName); + } + // Extract thread_id from the config (second argument) // LangGraph uses config.configurable.thread_id for conversation/session linking const config = args.length > 1 ? (args[1] as Record | undefined) : undefined; @@ -131,6 +153,26 @@ function instrumentCompiledGraphInvoke( span.setAttribute(GEN_AI_CONVERSATION_ID_ATTRIBUTE, threadId); } + // Inject callback handler and agent name into invoke config + if (sentryCallbackHandler) { + const invokeConfig = (args[1] ?? {}) as Record; + args[1] = invokeConfig; + + const existingMetadata = (invokeConfig.metadata ?? {}) as Record; + invokeConfig.metadata = { + ...existingMetadata, + __sentry_langgraph__: true, + ...(typeof graphName === 'string' ? { lc_agent_name: graphName } : {}), + }; + + const existingCallbacks = invokeConfig.callbacks as unknown[] | undefined; + if (!existingCallbacks) { + invokeConfig.callbacks = [sentryCallbackHandler]; + } else if (Array.isArray(existingCallbacks) && !existingCallbacks.includes(sentryCallbackHandler)) { + invokeConfig.callbacks = [...existingCallbacks, sentryCallbackHandler]; + } + } + // Extract available tools from the graph instance const tools = extractToolsFromCompiledGraph(graphInstance); if (tools) { @@ -164,7 +206,6 @@ function instrumentCompiledGraphInvoke( // Call original invoke const result = await Reflect.apply(target, thisArg, args); - // Set response attributes if (recordOutputs) { setResponseAttributes(span, inputMessages ?? null, result); } @@ -186,6 +227,59 @@ function instrumentCompiledGraphInvoke( }) as (...args: unknown[]) => Promise; } +/** + * Instruments createReactAgent to create invoke_agent and execute_tool spans. + */ +export function instrumentCreateReactAgent( + originalCreateReactAgent: (...args: unknown[]) => CompiledGraph, + options?: LangGraphOptions, +): (...args: unknown[]) => CompiledGraph { + const resolvedOptions = resolveAIRecordingOptions(options); + const sentryHandler = createLangChainCallbackHandler(resolvedOptions); + + return new Proxy(originalCreateReactAgent, { + apply(target, thisArg, args: unknown[]): CompiledGraph { + const llm = extractLLMFromParams(args); + const agentName = extractAgentNameFromParams(args); + + // Wrap tools with execute_tool spans (direct access gives us name, type, description) + const params = args[0] as Record | undefined; + if (params && Array.isArray(params.tools) && params.tools.length > 0) { + wrapToolsWithSpans(params.tools, resolvedOptions, agentName ?? undefined); + } + + // Suppress StateGraph.compile instrumentation inside createReactAgent + _insideCreateReactAgent = true; + let compiledGraph: CompiledGraph; + try { + compiledGraph = Reflect.apply(target, thisArg, args); + } finally { + _insideCreateReactAgent = false; + } + + // Wrap invoke() on the returned compiled graph + const originalInvoke = compiledGraph.invoke; + if (originalInvoke && typeof originalInvoke === 'function') { + const compileOptions: Record = {}; + if (agentName) { + compileOptions.name = agentName; + } + + compiledGraph.invoke = instrumentCompiledGraphInvoke( + originalInvoke.bind(compiledGraph) as (...args: unknown[]) => Promise, + compiledGraph, + compileOptions, + resolvedOptions, + llm, + sentryHandler, + ) as typeof originalInvoke; + } + + return compiledGraph; + }, + }) as (...args: unknown[]) => CompiledGraph; +} + /** * Directly instruments a StateGraph instance to add tracing spans * diff --git a/packages/core/src/tracing/langgraph/utils.ts b/packages/core/src/tracing/langgraph/utils.ts index 4b1990058924..e532dcaa7136 100644 --- a/packages/core/src/tracing/langgraph/utils.ts +++ b/packages/core/src/tracing/langgraph/utils.ts @@ -1,16 +1,167 @@ -import type { Span } from '../../types-hoist/span'; +import { captureException } from '../../exports'; +import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '../../semanticAttributes'; +import { SPAN_STATUS_ERROR } from '../../tracing'; +import type { Span, SpanAttributes } from '../../types-hoist/span'; import { + GEN_AI_AGENT_NAME_ATTRIBUTE, + GEN_AI_EXECUTE_TOOL_OPERATION_ATTRIBUTE, + GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_PIPELINE_NAME_ATTRIBUTE, GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE, GEN_AI_RESPONSE_MODEL_ATTRIBUTE, GEN_AI_RESPONSE_TEXT_ATTRIBUTE, GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, + GEN_AI_TOOL_CALL_ID_ATTRIBUTE, + GEN_AI_TOOL_INPUT_ATTRIBUTE, + GEN_AI_TOOL_OUTPUT_ATTRIBUTE, + GEN_AI_TOOL_DESCRIPTION_ATTRIBUTE, + GEN_AI_TOOL_NAME_ATTRIBUTE, + GEN_AI_TOOL_TYPE_ATTRIBUTE, GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, } from '../ai/gen-ai-attributes'; -import type { LangChainMessage } from '../langchain/types'; +import type { BaseChatModel, LangChainMessage } from '../langchain/types'; import { normalizeLangChainMessages } from '../langchain/utils'; -import type { CompiledGraph, LangGraphTool } from './types'; +import { startSpan } from '../trace'; +import { LANGGRAPH_ORIGIN } from './constants'; +import type { CompiledGraph, LangGraphOptions, LangGraphTool } from './types'; + +/** + * Extract LLM model object from createReactAgent params + */ +export function extractLLMFromParams(args: unknown[]): BaseChatModel | null { + const arg = args[0]; + return typeof arg === 'object' && + !!arg && + 'llm' in arg && + !!arg.llm && + typeof arg.llm === 'object' && + typeof (arg.llm as BaseChatModel).modelName === 'string' + ? (arg.llm as BaseChatModel) + : null; +} + +/** + * Extract agent name from createReactAgent params + */ +export function extractAgentNameFromParams(args: unknown[]): string | null { + const arg = args[0]; + if (typeof arg === 'object' && !!arg && 'name' in arg && typeof arg.name === 'string') { + return arg.name; + } + return null; +} + +/** + * Wraps an array of LangChain tools so each invocation creates a gen_ai.execute_tool span. + * + * Wraps each tool's invoke() method in place. A marker prevents double-wrapping. + */ +export function wrapToolsWithSpans(tools: unknown[], options: LangGraphOptions, agentName?: string): unknown[] { + const SENTRY_WRAPPED = '__sentry_tool_wrapped__'; + + for (const tool of tools) { + if (!tool || typeof tool !== 'object') { + continue; + } + + const t = tool as Record; + const originalInvoke = t.invoke; + if (typeof originalInvoke !== 'function' || Object.prototype.hasOwnProperty.call(t, SENTRY_WRAPPED)) { + continue; + } + + const toolName = typeof t.name === 'string' ? t.name : 'unknown_tool'; + const toolDescription = typeof t.description === 'string' ? t.description : undefined; + + const wrappedInvoke = new Proxy(originalInvoke as (...args: unknown[]) => unknown, { + apply(target, thisArg, args: unknown[]): unknown { + const spanAttributes: SpanAttributes = { + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: LANGGRAPH_ORIGIN, + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: GEN_AI_EXECUTE_TOOL_OPERATION_ATTRIBUTE, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', + [GEN_AI_TOOL_NAME_ATTRIBUTE]: toolName, + [GEN_AI_TOOL_TYPE_ATTRIBUTE]: 'function', + }; + + // Read agent name from LangChain's propagated config metadata at call time, + // so shared tools get the correct agent name for each invocation + const callConfig = args[1] as Record | undefined; + const callAgentName = (callConfig?.metadata as Record)?.lc_agent_name ?? agentName; + if (typeof callAgentName === 'string') { + spanAttributes[GEN_AI_AGENT_NAME_ATTRIBUTE] = callAgentName; + spanAttributes[GEN_AI_PIPELINE_NAME_ATTRIBUTE] = callAgentName; + } + + if (toolDescription) { + spanAttributes[GEN_AI_TOOL_DESCRIPTION_ATTRIBUTE] = toolDescription; + } + + // LangGraph ToolNode passes { name, args, id, type: "tool_call" } + const input = args[0] as Record | undefined; + if (typeof input === 'object' && !!input) { + if ('id' in input && typeof input.id === 'string') { + spanAttributes[GEN_AI_TOOL_CALL_ID_ATTRIBUTE] = input.id; + } + + if (options.recordInputs) { + const toolArgs = 'args' in input && typeof input.args === 'object' ? input.args : input; + try { + spanAttributes[GEN_AI_TOOL_INPUT_ATTRIBUTE] = JSON.stringify(toolArgs); + } catch { + // skip if not serializable + } + } + } + + return startSpan( + { + op: GEN_AI_EXECUTE_TOOL_OPERATION_ATTRIBUTE, + name: `execute_tool ${toolName}`, + attributes: spanAttributes, + }, + async span => { + try { + const result = await Reflect.apply(target, thisArg, args); + + if (options.recordOutputs) { + try { + // ToolMessage objects wrap the result in .content + const resultObj = result as Record | undefined; + const content = + resultObj && typeof resultObj === 'object' && 'content' in resultObj ? resultObj.content : result; + span.setAttribute( + GEN_AI_TOOL_OUTPUT_ATTRIBUTE, + typeof content === 'string' ? content : JSON.stringify(content), + ); + } catch { + // skip if not serializable + } + } + + return result; + } catch (error) { + span.setStatus({ code: SPAN_STATUS_ERROR, message: 'internal_error' }); + captureException(error, { + mechanism: { + handled: false, + type: 'auto.ai.langgraph.error', + }, + }); + throw error; + } + }, + ); + }, + }); + + t.invoke = wrappedInvoke; + Object.defineProperty(t, SENTRY_WRAPPED, { value: true, enumerable: false }); + } + + return tools; +} /** * Extract tool calls from messages diff --git a/packages/core/test/lib/utils/langgraph-utils.test.ts b/packages/core/test/lib/utils/langgraph-utils.test.ts new file mode 100644 index 000000000000..7d953f6797b0 --- /dev/null +++ b/packages/core/test/lib/utils/langgraph-utils.test.ts @@ -0,0 +1,33 @@ +import { describe, expect, it } from 'vitest'; +import { extractAgentNameFromParams, extractLLMFromParams } from '../../../src/tracing/langgraph/utils'; + +describe('extractLLMFromParams', () => { + it('returns null for empty or invalid args', () => { + expect(extractLLMFromParams([])).toBe(null); + expect(extractLLMFromParams([null])).toBe(null); + expect(extractLLMFromParams([{}])).toBe(null); + expect(extractLLMFromParams([{ llm: false }])).toBe(null); + expect(extractLLMFromParams([{ llm: 123 }])).toBe(null); + expect(extractLLMFromParams([{ llm: {} }])).toBe(null); + }); + + it('extracts llm object with modelName', () => { + expect(extractLLMFromParams([{ llm: { modelName: 'gpt-4o-mini', lc_namespace: ['langchain'] } }])).toStrictEqual({ + modelName: 'gpt-4o-mini', + lc_namespace: ['langchain'], + }); + }); +}); + +describe('extractAgentNameFromParams', () => { + it('returns null for empty or invalid args', () => { + expect(extractAgentNameFromParams([])).toBe(null); + expect(extractAgentNameFromParams([null])).toBe(null); + expect(extractAgentNameFromParams([{}])).toBe(null); + expect(extractAgentNameFromParams([{ name: 123 }])).toBe(null); + }); + + it('extracts agent name from params', () => { + expect(extractAgentNameFromParams([{ name: 'my_agent' }])).toBe('my_agent'); + }); +}); diff --git a/packages/node/src/integrations/tracing/langgraph/instrumentation.ts b/packages/node/src/integrations/tracing/langgraph/instrumentation.ts index d43765206b61..f1e87e1c8c4c 100644 --- a/packages/node/src/integrations/tracing/langgraph/instrumentation.ts +++ b/packages/node/src/integrations/tracing/langgraph/instrumentation.ts @@ -5,8 +5,8 @@ import { InstrumentationNodeModuleDefinition, InstrumentationNodeModuleFile, } from '@opentelemetry/instrumentation'; -import type { LangGraphOptions } from '@sentry/core'; -import { instrumentLangGraph, SDK_VERSION } from '@sentry/core'; +import type { CompiledGraph, LangGraphOptions } from '@sentry/core'; +import { getClient, instrumentCreateReactAgent, instrumentLangGraph, SDK_VERSION } from '@sentry/core'; const supportedVersions = ['>=0.0.0 <2.0.0']; @@ -18,6 +18,7 @@ type LangGraphInstrumentationOptions = InstrumentationConfig & LangGraphOptions; interface PatchedModuleExports { [key: string]: unknown; StateGraph?: abstract new (...args: unknown[]) => unknown; + createReactAgent?: (...args: unknown[]) => CompiledGraph; } /** @@ -31,40 +32,85 @@ export class SentryLangGraphInstrumentation extends InstrumentationBase exports, - [ - new InstrumentationNodeModuleFile( - /** - * In CJS, LangGraph packages re-export from dist/index.cjs files. - * Patching only the root module sometimes misses the real implementation or - * gets overwritten when that file is loaded. We add a file-level patch so that - * _patch runs again on the concrete implementation - */ - '@langchain/langgraph/dist/index.cjs', - supportedVersions, - this._patch.bind(this), - exports => exports, - ), - ], - ); - return module; + public init(): InstrumentationModuleDefinition[] { + return [ + new InstrumentationNodeModuleDefinition( + '@langchain/langgraph', + supportedVersions, + this._patch.bind(this), + exports => exports, + [ + new InstrumentationNodeModuleFile( + /** + * In CJS, LangGraph packages re-export from dist/index.cjs files. + * Patching only the root module sometimes misses the real implementation or + * gets overwritten when that file is loaded. We add a file-level patch so that + * _patch runs again on the concrete implementation + */ + '@langchain/langgraph/dist/index.cjs', + supportedVersions, + this._patch.bind(this), + exports => exports, + ), + new InstrumentationNodeModuleFile( + /** + * In CJS, the prebuilt submodule re-exports from dist/prebuilt/index.cjs. + * We add a file-level patch under the main module so that CJS require() + * of @langchain/langgraph/prebuilt gets patched. + */ + '@langchain/langgraph/dist/prebuilt/index.cjs', + supportedVersions, + this._patch.bind(this), + exports => exports, + ), + ], + ), + new InstrumentationNodeModuleDefinition( + '@langchain/langgraph/prebuilt', + supportedVersions, + this._patch.bind(this), + exports => exports, + [ + new InstrumentationNodeModuleFile( + /** + * In CJS, the prebuilt submodule re-exports from dist/prebuilt/index.cjs. + * We add file-level patches so _patch runs on the concrete implementation. + */ + '@langchain/langgraph/dist/prebuilt/index.cjs', + supportedVersions, + this._patch.bind(this), + exports => exports, + ), + ], + ), + ]; } /** * Core patch logic applying instrumentation to the LangGraph module. */ private _patch(exports: PatchedModuleExports): PatchedModuleExports | void { + const client = getClient(); + const options = { + ...this.getConfig(), + recordInputs: this.getConfig().recordInputs ?? client?.getOptions().sendDefaultPii, + recordOutputs: this.getConfig().recordOutputs ?? client?.getOptions().sendDefaultPii, + }; + // Patch StateGraph.compile to instrument both compile() and invoke() if (exports.StateGraph && typeof exports.StateGraph === 'function') { - instrumentLangGraph( - exports.StateGraph.prototype as { compile: (...args: unknown[]) => unknown }, - this.getConfig(), - ); + instrumentLangGraph(exports.StateGraph.prototype as { compile: (...args: unknown[]) => unknown }, options); + } + + // Patch createReactAgent to instrument agent creation and invocation + if (exports.createReactAgent && typeof exports.createReactAgent === 'function') { + const originalCreateReactAgent = exports.createReactAgent; + Object.defineProperty(exports, 'createReactAgent', { + value: instrumentCreateReactAgent(originalCreateReactAgent as (...args: unknown[]) => CompiledGraph, options), + writable: true, + enumerable: true, + configurable: true, + }); } return exports;