Skip to content

Commit 6aa24b0

Browse files
committed
feat(core): Instrument langgraph createReactAgent
This PR adds instrumentation for LangGraph's `createReactAgent` API. It: - extracts agent name, LLM model, and tools from params - wraps compiled graph's `invoke()` with invoke_agent span - wraps tool `invoke()` with execute_tool spans (name, type, description, arguments, result per our new conventions) - injects LangChain callback handler + `lc_agent_name` metadata at invoke level for chat span creation and agent name propagation to all child spans - suppresses `StateGraph.compile` instrumentation inside `createReactAgent` to avoid duplicate spans The LangChain callback handler was improved to: - read gen_ai.agent.name from `metadata.lc_agent_name` (this is a convention in newer versions of LangGraph createAgent so we took this over for our supported version) - suppresses chain and tool callback spans inside agent context to avoid duplicates with our direct instrumentation (based on `metadata.loc_agent_name` presence) - extracts tool definitions from `extraParams` in `handleChatModelStart` - supports OpenAI tool calls via `message.tool_calls` (not just Anthropic content scanning) - normalizes tool call format and use our new convention attributes - uses `runName` for tool name (set by LangChain's StructuredTool) It exports: - `instrumentCreateReactAgent` from core, browser, cloudflare - New constants: - GEN_AI_TOOL_CALL_ARGUMENTS_ATTRIBUTE, - GEN_AI_TOOL_CALL_RESULT_ATTRIBUTE Closes: #19372
1 parent 6a397a3 commit 6aa24b0

File tree

15 files changed

+762
-58
lines changed

15 files changed

+762
-58
lines changed
Lines changed: 65 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,65 @@
1+
import { ChatAnthropic } from '@langchain/anthropic';
2+
import { HumanMessage, SystemMessage } from '@langchain/core/messages';
3+
import { createReactAgent } from '@langchain/langgraph/prebuilt';
4+
import * as Sentry from '@sentry/node';
5+
import express from 'express';
6+
7+
function startMockAnthropicServer() {
8+
const app = express();
9+
app.use(express.json());
10+
11+
app.post('/v1/messages', (req, res) => {
12+
const model = req.body.model;
13+
14+
res.json({
15+
id: 'msg_react_agent_123',
16+
type: 'message',
17+
role: 'assistant',
18+
content: [
19+
{
20+
type: 'text',
21+
text: 'Paris is the capital of France.',
22+
},
23+
],
24+
model: model,
25+
stop_reason: 'end_turn',
26+
stop_sequence: null,
27+
usage: {
28+
input_tokens: 20,
29+
output_tokens: 10,
30+
},
31+
});
32+
});
33+
34+
return new Promise(resolve => {
35+
const server = app.listen(0, () => {
36+
resolve(server);
37+
});
38+
});
39+
}
40+
41+
async function run() {
42+
const server = await startMockAnthropicServer();
43+
const baseUrl = `http://localhost:${server.address().port}`;
44+
45+
await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
46+
const llm = new ChatAnthropic({
47+
model: 'claude-3-5-sonnet-20241022',
48+
apiKey: 'mock-api-key',
49+
clientOptions: {
50+
baseURL: baseUrl,
51+
},
52+
});
53+
54+
const agent = createReactAgent({ llm, tools: [], name: 'helpful_assistant' });
55+
56+
await agent.invoke({
57+
messages: [new SystemMessage('You are a helpful assistant.'), new HumanMessage('What is the capital of France?')],
58+
});
59+
});
60+
61+
await Sentry.flush(2000);
62+
server.close();
63+
}
64+
65+
run();
Lines changed: 122 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,122 @@
1+
import { tool } from '@langchain/core/tools';
2+
import { ChatAnthropic } from '@langchain/anthropic';
3+
import { createReactAgent } from '@langchain/langgraph/prebuilt';
4+
import { HumanMessage } from '@langchain/core/messages';
5+
import * as Sentry from '@sentry/node';
6+
import express from 'express';
7+
import { z } from 'zod';
8+
9+
let callCount = 0;
10+
11+
function startMockAnthropicServer() {
12+
const app = express();
13+
app.use(express.json());
14+
15+
app.post('/v1/messages', (req, res) => {
16+
callCount++;
17+
const model = req.body.model;
18+
19+
if (callCount === 1) {
20+
// First call: model decides to call the "add" tool
21+
res.json({
22+
id: 'msg_1',
23+
type: 'message',
24+
role: 'assistant',
25+
content: [
26+
{
27+
type: 'tool_use',
28+
id: 'toolu_add_1',
29+
name: 'add',
30+
input: { a: 3, b: 5 },
31+
},
32+
],
33+
model: model,
34+
stop_reason: 'tool_use',
35+
usage: { input_tokens: 20, output_tokens: 10 },
36+
});
37+
} else if (callCount === 2) {
38+
// Second call: model sees add result=8, calls "multiply"
39+
res.json({
40+
id: 'msg_2',
41+
type: 'message',
42+
role: 'assistant',
43+
content: [
44+
{
45+
type: 'tool_use',
46+
id: 'toolu_mul_1',
47+
name: 'multiply',
48+
input: { a: 8, b: 4 },
49+
},
50+
],
51+
model: model,
52+
stop_reason: 'tool_use',
53+
usage: { input_tokens: 30, output_tokens: 10 },
54+
});
55+
} else {
56+
// Third call: model returns final answer
57+
res.json({
58+
id: 'msg_3',
59+
type: 'message',
60+
role: 'assistant',
61+
content: [{ type: 'text', text: 'The result is 32.' }],
62+
model: model,
63+
stop_reason: 'end_turn',
64+
usage: { input_tokens: 40, output_tokens: 10 },
65+
});
66+
}
67+
});
68+
69+
return new Promise(resolve => {
70+
const server = app.listen(0, () => resolve(server));
71+
});
72+
}
73+
74+
async function run() {
75+
const server = await startMockAnthropicServer();
76+
const baseUrl = `http://localhost:${server.address().port}`;
77+
78+
await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
79+
const llm = new ChatAnthropic({
80+
model: 'claude-3-5-sonnet-20241022',
81+
apiKey: 'mock-api-key',
82+
clientOptions: { baseURL: baseUrl },
83+
});
84+
85+
const addTool = tool(
86+
async ({ a, b }) => {
87+
return String(a + b);
88+
},
89+
{
90+
name: 'add',
91+
description: 'Add two numbers',
92+
schema: z.object({ a: z.number(), b: z.number() }),
93+
},
94+
);
95+
96+
const multiplyTool = tool(
97+
async ({ a, b }) => {
98+
return String(a * b);
99+
},
100+
{
101+
name: 'multiply',
102+
description: 'Multiply two numbers',
103+
schema: z.object({ a: z.number(), b: z.number() }),
104+
},
105+
);
106+
107+
const agent = createReactAgent({
108+
llm,
109+
tools: [addTool, multiplyTool],
110+
name: 'math_assistant',
111+
});
112+
113+
await agent.invoke({
114+
messages: [new HumanMessage('Calculate (3 + 5) * 4')],
115+
});
116+
});
117+
118+
await Sentry.flush(2000);
119+
server.close();
120+
}
121+
122+
run();
Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
import * as Sentry from '@sentry/node';
2+
import { loggingTransport } from '@sentry-internal/node-integration-tests';
3+
4+
Sentry.init({
5+
dsn: 'https://public@dsn.ingest.sentry.io/1337',
6+
release: '1.0',
7+
tracesSampleRate: 1.0,
8+
sendDefaultPii: true,
9+
transport: loggingTransport,
10+
beforeSendTransaction: event => {
11+
// Filter out mock express server transactions
12+
if (event.transaction && event.transaction.includes('/v1/messages')) {
13+
return null;
14+
}
15+
return event;
16+
},
17+
});

dev-packages/node-integration-tests/suites/tracing/langgraph/test.ts

Lines changed: 84 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -364,4 +364,88 @@ describe('LangGraph integration', () => {
364364
await createRunner().ignore('event').expect({ transaction: EXPECTED_TRANSACTION_RESUME }).start().completed();
365365
});
366366
});
367+
368+
// createReactAgent tests
369+
const EXPECTED_TRANSACTION_REACT_AGENT = {
370+
transaction: 'main',
371+
spans: expect.arrayContaining([
372+
// invoke_agent span (no create_agent span expected)
373+
expect.objectContaining({
374+
data: expect.objectContaining({
375+
[GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent',
376+
[SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent',
377+
[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph',
378+
[GEN_AI_AGENT_NAME_ATTRIBUTE]: 'helpful_assistant',
379+
[GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'helpful_assistant',
380+
}),
381+
description: 'invoke_agent helpful_assistant',
382+
op: 'gen_ai.invoke_agent',
383+
origin: 'auto.ai.langgraph',
384+
status: 'ok',
385+
}),
386+
// chat span (from Anthropic integration) should be a child with inherited agent name
387+
expect.objectContaining({
388+
data: expect.objectContaining({
389+
[GEN_AI_AGENT_NAME_ATTRIBUTE]: 'helpful_assistant',
390+
}),
391+
op: 'gen_ai.chat',
392+
}),
393+
]),
394+
};
395+
396+
createEsmAndCjsTests(__dirname, 'agent-scenario.mjs', 'instrument-agent.mjs', (createRunner, test) => {
397+
test('should instrument createReactAgent with agent and chat spans', { timeout: 30000 }, async () => {
398+
await createRunner()
399+
.ignore('event')
400+
.expect({ transaction: EXPECTED_TRANSACTION_REACT_AGENT })
401+
.start()
402+
.completed();
403+
});
404+
});
405+
406+
// createReactAgent with tools - verifies tool execution spans
407+
const EXPECTED_TRANSACTION_REACT_AGENT_TOOLS = {
408+
transaction: 'main',
409+
spans: expect.arrayContaining([
410+
// invoke_agent span
411+
expect.objectContaining({
412+
data: expect.objectContaining({
413+
[GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent',
414+
[GEN_AI_AGENT_NAME_ATTRIBUTE]: 'math_assistant',
415+
}),
416+
op: 'gen_ai.invoke_agent',
417+
status: 'ok',
418+
}),
419+
// execute_tool span for "add"
420+
expect.objectContaining({
421+
data: expect.objectContaining({
422+
[GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool',
423+
'gen_ai.tool.name': 'add',
424+
}),
425+
description: 'execute_tool add',
426+
op: 'gen_ai.execute_tool',
427+
status: 'ok',
428+
}),
429+
// execute_tool span for "multiply"
430+
expect.objectContaining({
431+
data: expect.objectContaining({
432+
[GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool',
433+
'gen_ai.tool.name': 'multiply',
434+
}),
435+
description: 'execute_tool multiply',
436+
op: 'gen_ai.execute_tool',
437+
status: 'ok',
438+
}),
439+
]),
440+
};
441+
442+
createEsmAndCjsTests(__dirname, 'agent-tools-scenario.mjs', 'instrument-agent.mjs', (createRunner, test) => {
443+
test('should create tool execution spans for createReactAgent with tools', { timeout: 30000 }, async () => {
444+
await createRunner()
445+
.ignore('event')
446+
.expect({ transaction: EXPECTED_TRANSACTION_REACT_AGENT_TOOLS })
447+
.start()
448+
.completed();
449+
});
450+
});
367451
});

packages/browser/src/index.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -69,6 +69,7 @@ export {
6969
instrumentOpenAiClient,
7070
instrumentGoogleGenAIClient,
7171
instrumentLangGraph,
72+
instrumentCreateReactAgent,
7273
createLangChainCallbackHandler,
7374
logger,
7475
} from '@sentry/core';

packages/cloudflare/src/index.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -107,6 +107,7 @@ export {
107107
logger,
108108
metrics,
109109
instrumentLangGraph,
110+
instrumentCreateReactAgent,
110111
} from '@sentry/core';
111112

112113
export { withSentry } from './withSentry';

packages/core/src/index.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -171,7 +171,7 @@ export type { GoogleGenAIResponse } from './tracing/google-genai/types';
171171
export { createLangChainCallbackHandler } from './tracing/langchain';
172172
export { LANGCHAIN_INTEGRATION_NAME } from './tracing/langchain/constants';
173173
export type { LangChainOptions, LangChainIntegration } from './tracing/langchain/types';
174-
export { instrumentStateGraphCompile, instrumentLangGraph } from './tracing/langgraph';
174+
export { instrumentStateGraphCompile, instrumentCreateReactAgent, instrumentLangGraph } from './tracing/langgraph';
175175
export { LANGGRAPH_INTEGRATION_NAME } from './tracing/langgraph/constants';
176176
export type { LangGraphOptions, LangGraphIntegration, CompiledGraph } from './tracing/langgraph/types';
177177
export type { OpenAiClient, OpenAiOptions, InstrumentedMethod } from './tracing/openai/types';

packages/core/src/tracing/ai/gen-ai-attributes.ts

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -282,3 +282,15 @@ export const GEN_AI_TOOL_OUTPUT_ATTRIBUTE = 'gen_ai.tool.output';
282282
* @see https://opentelemetry.io/docs/specs/semconv/registry/attributes/gen-ai/#gen-ai-tool-description
283283
*/
284284
export const GEN_AI_TOOL_DESCRIPTION_ATTRIBUTE = 'gen_ai.tool.description';
285+
286+
/**
287+
* The tool call arguments (JSON string)
288+
* Preferred over gen_ai.tool.input
289+
*/
290+
export const GEN_AI_TOOL_CALL_ARGUMENTS_ATTRIBUTE = 'gen_ai.tool.call.arguments';
291+
292+
/**
293+
* The tool call result
294+
* Preferred over gen_ai.tool.output
295+
*/
296+
export const GEN_AI_TOOL_CALL_RESULT_ATTRIBUTE = 'gen_ai.tool.call.result';

0 commit comments

Comments
 (0)