Skip to content

Commit e2d8d45

Browse files
committed
feat(core): Instrument langgraph createReactAgent
This PR adds instrumentation for LangGraph's `createReactAgent` API. It: - extracts agent name, LLM model, and tools from params - wraps compiled graph's `invoke()` with invoke_agent span - wraps tool `invoke()` with execute_tool spans (name, type, description, arguments, result per our new conventions) - injects LangChain callback handler + `lc_agent_name` metadata at invoke level for chat span creation and agent name propagation to all child spans - suppresses `StateGraph.compile` instrumentation inside `createReactAgent` to avoid duplicate spans The LangChain callback handler was improved to: - read gen_ai.agent.name from `metadata.lc_agent_name` (this is a convention in newer versions of LangGraph createAgent so we took this over for our supported version) - suppresses chain and tool callback spans inside agent context to avoid duplicates with our direct instrumentation (based on `metadata.loc_agent_name` presence) - extracts tool definitions from `extraParams` in `handleChatModelStart` - supports OpenAI tool calls via `message.tool_calls` (not just Anthropic content scanning) - normalizes tool call format and use our new convention attributes - uses `runName` for tool name (set by LangChain's StructuredTool) It exports: - `instrumentCreateReactAgent` from core, browser, cloudflare - New constants: - GEN_AI_TOOL_CALL_ARGUMENTS_ATTRIBUTE, - GEN_AI_TOOL_CALL_RESULT_ATTRIBUTE Closes: #19372
1 parent 50438f9 commit e2d8d45

File tree

15 files changed

+768
-58
lines changed

15 files changed

+768
-58
lines changed
Lines changed: 65 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,65 @@
1+
import { ChatAnthropic } from '@langchain/anthropic';
2+
import { HumanMessage, SystemMessage } from '@langchain/core/messages';
3+
import { createReactAgent } from '@langchain/langgraph/prebuilt';
4+
import * as Sentry from '@sentry/node';
5+
import express from 'express';
6+
7+
function startMockAnthropicServer() {
8+
const app = express();
9+
app.use(express.json());
10+
11+
app.post('/v1/messages', (req, res) => {
12+
const model = req.body.model;
13+
14+
res.json({
15+
id: 'msg_react_agent_123',
16+
type: 'message',
17+
role: 'assistant',
18+
content: [
19+
{
20+
type: 'text',
21+
text: 'Paris is the capital of France.',
22+
},
23+
],
24+
model: model,
25+
stop_reason: 'end_turn',
26+
stop_sequence: null,
27+
usage: {
28+
input_tokens: 20,
29+
output_tokens: 10,
30+
},
31+
});
32+
});
33+
34+
return new Promise(resolve => {
35+
const server = app.listen(0, () => {
36+
resolve(server);
37+
});
38+
});
39+
}
40+
41+
async function run() {
42+
const server = await startMockAnthropicServer();
43+
const baseUrl = `http://localhost:${server.address().port}`;
44+
45+
await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
46+
const llm = new ChatAnthropic({
47+
model: 'claude-3-5-sonnet-20241022',
48+
apiKey: 'mock-api-key',
49+
clientOptions: {
50+
baseURL: baseUrl,
51+
},
52+
});
53+
54+
const agent = createReactAgent({ llm, tools: [], name: 'helpful_assistant' });
55+
56+
await agent.invoke({
57+
messages: [new SystemMessage('You are a helpful assistant.'), new HumanMessage('What is the capital of France?')],
58+
});
59+
});
60+
61+
await Sentry.flush(2000);
62+
server.close();
63+
}
64+
65+
run();
Lines changed: 122 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,122 @@
1+
import { tool } from '@langchain/core/tools';
2+
import { ChatAnthropic } from '@langchain/anthropic';
3+
import { createReactAgent } from '@langchain/langgraph/prebuilt';
4+
import { HumanMessage } from '@langchain/core/messages';
5+
import * as Sentry from '@sentry/node';
6+
import express from 'express';
7+
import { z } from 'zod';
8+
9+
let callCount = 0;
10+
11+
function startMockAnthropicServer() {
12+
const app = express();
13+
app.use(express.json());
14+
15+
app.post('/v1/messages', (req, res) => {
16+
callCount++;
17+
const model = req.body.model;
18+
19+
if (callCount === 1) {
20+
// First call: model decides to call the "add" tool
21+
res.json({
22+
id: 'msg_1',
23+
type: 'message',
24+
role: 'assistant',
25+
content: [
26+
{
27+
type: 'tool_use',
28+
id: 'toolu_add_1',
29+
name: 'add',
30+
input: { a: 3, b: 5 },
31+
},
32+
],
33+
model: model,
34+
stop_reason: 'tool_use',
35+
usage: { input_tokens: 20, output_tokens: 10 },
36+
});
37+
} else if (callCount === 2) {
38+
// Second call: model sees add result=8, calls "multiply"
39+
res.json({
40+
id: 'msg_2',
41+
type: 'message',
42+
role: 'assistant',
43+
content: [
44+
{
45+
type: 'tool_use',
46+
id: 'toolu_mul_1',
47+
name: 'multiply',
48+
input: { a: 8, b: 4 },
49+
},
50+
],
51+
model: model,
52+
stop_reason: 'tool_use',
53+
usage: { input_tokens: 30, output_tokens: 10 },
54+
});
55+
} else {
56+
// Third call: model returns final answer
57+
res.json({
58+
id: 'msg_3',
59+
type: 'message',
60+
role: 'assistant',
61+
content: [{ type: 'text', text: 'The result is 32.' }],
62+
model: model,
63+
stop_reason: 'end_turn',
64+
usage: { input_tokens: 40, output_tokens: 10 },
65+
});
66+
}
67+
});
68+
69+
return new Promise(resolve => {
70+
const server = app.listen(0, () => resolve(server));
71+
});
72+
}
73+
74+
async function run() {
75+
const server = await startMockAnthropicServer();
76+
const baseUrl = `http://localhost:${server.address().port}`;
77+
78+
await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
79+
const llm = new ChatAnthropic({
80+
model: 'claude-3-5-sonnet-20241022',
81+
apiKey: 'mock-api-key',
82+
clientOptions: { baseURL: baseUrl },
83+
});
84+
85+
const addTool = tool(
86+
async ({ a, b }) => {
87+
return String(a + b);
88+
},
89+
{
90+
name: 'add',
91+
description: 'Add two numbers',
92+
schema: z.object({ a: z.number(), b: z.number() }),
93+
},
94+
);
95+
96+
const multiplyTool = tool(
97+
async ({ a, b }) => {
98+
return String(a * b);
99+
},
100+
{
101+
name: 'multiply',
102+
description: 'Multiply two numbers',
103+
schema: z.object({ a: z.number(), b: z.number() }),
104+
},
105+
);
106+
107+
const agent = createReactAgent({
108+
llm,
109+
tools: [addTool, multiplyTool],
110+
name: 'math_assistant',
111+
});
112+
113+
await agent.invoke({
114+
messages: [new HumanMessage('Calculate (3 + 5) * 4')],
115+
});
116+
});
117+
118+
await Sentry.flush(2000);
119+
server.close();
120+
}
121+
122+
run();
Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
import * as Sentry from '@sentry/node';
2+
import { loggingTransport } from '@sentry-internal/node-integration-tests';
3+
4+
Sentry.init({
5+
dsn: 'https://public@dsn.ingest.sentry.io/1337',
6+
release: '1.0',
7+
tracesSampleRate: 1.0,
8+
sendDefaultPii: true,
9+
transport: loggingTransport,
10+
beforeSendTransaction: event => {
11+
// Filter out mock express server transactions
12+
if (event.transaction && event.transaction.includes('/v1/messages')) {
13+
return null;
14+
}
15+
return event;
16+
},
17+
});

dev-packages/node-integration-tests/suites/tracing/langgraph/test.ts

Lines changed: 84 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -445,4 +445,88 @@ describe('LangGraph integration', () => {
445445
});
446446
},
447447
);
448+
449+
// createReactAgent tests
450+
const EXPECTED_TRANSACTION_REACT_AGENT = {
451+
transaction: 'main',
452+
spans: expect.arrayContaining([
453+
// invoke_agent span (no create_agent span expected)
454+
expect.objectContaining({
455+
data: expect.objectContaining({
456+
[GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent',
457+
[SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent',
458+
[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph',
459+
[GEN_AI_AGENT_NAME_ATTRIBUTE]: 'helpful_assistant',
460+
[GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'helpful_assistant',
461+
}),
462+
description: 'invoke_agent helpful_assistant',
463+
op: 'gen_ai.invoke_agent',
464+
origin: 'auto.ai.langgraph',
465+
status: 'ok',
466+
}),
467+
// chat span (from Anthropic integration) should be a child with inherited agent name
468+
expect.objectContaining({
469+
data: expect.objectContaining({
470+
[GEN_AI_AGENT_NAME_ATTRIBUTE]: 'helpful_assistant',
471+
}),
472+
op: 'gen_ai.chat',
473+
}),
474+
]),
475+
};
476+
477+
createEsmAndCjsTests(__dirname, 'agent-scenario.mjs', 'instrument-agent.mjs', (createRunner, test) => {
478+
test('should instrument createReactAgent with agent and chat spans', { timeout: 30000 }, async () => {
479+
await createRunner()
480+
.ignore('event')
481+
.expect({ transaction: EXPECTED_TRANSACTION_REACT_AGENT })
482+
.start()
483+
.completed();
484+
});
485+
});
486+
487+
// createReactAgent with tools - verifies tool execution spans
488+
const EXPECTED_TRANSACTION_REACT_AGENT_TOOLS = {
489+
transaction: 'main',
490+
spans: expect.arrayContaining([
491+
// invoke_agent span
492+
expect.objectContaining({
493+
data: expect.objectContaining({
494+
[GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent',
495+
[GEN_AI_AGENT_NAME_ATTRIBUTE]: 'math_assistant',
496+
}),
497+
op: 'gen_ai.invoke_agent',
498+
status: 'ok',
499+
}),
500+
// execute_tool span for "add"
501+
expect.objectContaining({
502+
data: expect.objectContaining({
503+
[GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool',
504+
'gen_ai.tool.name': 'add',
505+
}),
506+
description: 'execute_tool add',
507+
op: 'gen_ai.execute_tool',
508+
status: 'ok',
509+
}),
510+
// execute_tool span for "multiply"
511+
expect.objectContaining({
512+
data: expect.objectContaining({
513+
[GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool',
514+
'gen_ai.tool.name': 'multiply',
515+
}),
516+
description: 'execute_tool multiply',
517+
op: 'gen_ai.execute_tool',
518+
status: 'ok',
519+
}),
520+
]),
521+
};
522+
523+
createEsmAndCjsTests(__dirname, 'agent-tools-scenario.mjs', 'instrument-agent.mjs', (createRunner, test) => {
524+
test('should create tool execution spans for createReactAgent with tools', { timeout: 30000 }, async () => {
525+
await createRunner()
526+
.ignore('event')
527+
.expect({ transaction: EXPECTED_TRANSACTION_REACT_AGENT_TOOLS })
528+
.start()
529+
.completed();
530+
});
531+
});
448532
});

packages/browser/src/index.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -72,6 +72,7 @@ export {
7272
instrumentOpenAiClient,
7373
instrumentGoogleGenAIClient,
7474
instrumentLangGraph,
75+
instrumentCreateReactAgent,
7576
createLangChainCallbackHandler,
7677
instrumentLangChainEmbeddings,
7778
logger,

packages/cloudflare/src/index.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -110,6 +110,7 @@ export {
110110
withStreamedSpan,
111111
spanStreamingIntegration,
112112
instrumentLangGraph,
113+
instrumentCreateReactAgent,
113114
} from '@sentry/core';
114115

115116
export { withSentry } from './withSentry';

packages/core/src/index.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -178,7 +178,7 @@ export type { GoogleGenAIResponse } from './tracing/google-genai/types';
178178
export { createLangChainCallbackHandler, instrumentLangChainEmbeddings } from './tracing/langchain';
179179
export { LANGCHAIN_INTEGRATION_NAME } from './tracing/langchain/constants';
180180
export type { LangChainOptions, LangChainIntegration } from './tracing/langchain/types';
181-
export { instrumentStateGraphCompile, instrumentLangGraph } from './tracing/langgraph';
181+
export { instrumentStateGraphCompile, instrumentCreateReactAgent, instrumentLangGraph } from './tracing/langgraph';
182182
export { LANGGRAPH_INTEGRATION_NAME } from './tracing/langgraph/constants';
183183
export type { LangGraphOptions, LangGraphIntegration, CompiledGraph } from './tracing/langgraph/types';
184184
export type { OpenAiClient, OpenAiOptions, InstrumentedMethod } from './tracing/openai/types';

packages/core/src/tracing/ai/gen-ai-attributes.ts

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -262,3 +262,15 @@ export const GEN_AI_TOOL_OUTPUT_ATTRIBUTE = 'gen_ai.tool.output';
262262
* @see https://opentelemetry.io/docs/specs/semconv/registry/attributes/gen-ai/#gen-ai-tool-description
263263
*/
264264
export const GEN_AI_TOOL_DESCRIPTION_ATTRIBUTE = 'gen_ai.tool.description';
265+
266+
/**
267+
* The tool call arguments (JSON string)
268+
* Preferred over gen_ai.tool.input
269+
*/
270+
export const GEN_AI_TOOL_CALL_ARGUMENTS_ATTRIBUTE = 'gen_ai.tool.call.arguments';
271+
272+
/**
273+
* The tool call result
274+
* Preferred over gen_ai.tool.output
275+
*/
276+
export const GEN_AI_TOOL_CALL_RESULT_ATTRIBUTE = 'gen_ai.tool.call.result';

0 commit comments

Comments
 (0)