Skip to content

Commit d4c4cd4

Browse files
fix(langchain): Set agent name as gen_ai.agent.name
1 parent 9193eb0 commit d4c4cd4

File tree

2 files changed

+66
-94
lines changed

2 files changed

+66
-94
lines changed

sentry_sdk/integrations/langchain.py

Lines changed: 60 additions & 94 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
import contextvars
21
import itertools
32
import sys
43
import json
@@ -153,44 +152,6 @@ def _transform_langchain_message_content(content: "Any") -> "Any":
153152
return content
154153

155154

156-
# Contextvar to track agent names in a stack for re-entrant agent support
157-
_agent_stack: "contextvars.ContextVar[Optional[List[Optional[str]]]]" = (
158-
contextvars.ContextVar("langchain_agent_stack", default=None)
159-
)
160-
161-
162-
def _push_agent(agent_name: "Optional[str]") -> None:
163-
"""Push an agent name onto the stack."""
164-
stack = _agent_stack.get()
165-
if stack is None:
166-
stack = []
167-
else:
168-
# Copy the list to maintain contextvar isolation across async contexts
169-
stack = stack.copy()
170-
stack.append(agent_name)
171-
_agent_stack.set(stack)
172-
173-
174-
def _pop_agent() -> "Optional[str]":
175-
"""Pop an agent name from the stack and return it."""
176-
stack = _agent_stack.get()
177-
if stack:
178-
# Copy the list to maintain contextvar isolation across async contexts
179-
stack = stack.copy()
180-
agent_name = stack.pop()
181-
_agent_stack.set(stack)
182-
return agent_name
183-
return None
184-
185-
186-
def _get_current_agent() -> "Optional[str]":
187-
"""Get the current agent name (top of stack) without removing it."""
188-
stack = _agent_stack.get()
189-
if stack:
190-
return stack[-1]
191-
return None
192-
193-
194155
def _get_system_instructions(messages: "List[List[BaseMessage]]") -> "List[str]":
195156
system_instructions = []
196157

@@ -454,8 +415,8 @@ def on_chat_model_start(
454415
elif "openai" in ai_type:
455416
span.set_data(SPANDATA.GEN_AI_SYSTEM, "openai")
456417

457-
agent_name = _get_current_agent()
458-
if agent_name:
418+
agent_name = kwargs.get("metadata", {}).get("lc_agent_name")
419+
if agent_name is not None:
459420
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, agent_name)
460421

461422
for key, attribute in DATA_FIELDS.items():
@@ -654,8 +615,8 @@ def on_tool_start(
654615
if tool_description is not None:
655616
span.set_data(SPANDATA.GEN_AI_TOOL_DESCRIPTION, tool_description)
656617

657-
agent_name = _get_current_agent()
658-
if agent_name:
618+
agent_name = kwargs.get("metadata", {}).get("lc_agent_name")
619+
if agent_name is not None:
659620
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, agent_name)
660621

661622
if should_send_default_pii() and self.include_prompts:
@@ -782,9 +743,7 @@ def _record_token_usage(span: "Span", response: "Any") -> None:
782743
span.set_data(SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS, total_tokens)
783744

784745

785-
def _get_request_data(
786-
obj: "Any", args: "Any", kwargs: "Any"
787-
) -> "tuple[Optional[str], Optional[List[Any]]]":
746+
def _get_available_tools(obj: "Any") -> "tuple[Optional[str], Optional[List[Any]]]":
788747
"""
789748
Get the agent name and available tools for the agent.
790749
"""
@@ -799,6 +758,13 @@ def _get_request_data(
799758
)
800759
tools = tools if tools and len(tools) > 0 else None
801760

761+
return tools
762+
763+
764+
def _get_run_name(obj: "Any", args: "Any"):
765+
agent = getattr(obj, "agent", None)
766+
runnable = getattr(agent, "runnable", None)
767+
runnable_config = getattr(runnable, "config", {})
802768
try:
803769
agent_name = None
804770
if len(args) > 1:
@@ -808,7 +774,7 @@ def _get_request_data(
808774
except Exception:
809775
pass
810776

811-
return (agent_name, tools)
777+
return agent_name
812778

813779

814780
def _simplify_langchain_tools(tools: "Any") -> "Optional[List[Any]]":
@@ -976,58 +942,53 @@ def new_invoke(self: "Any", *args: "Any", **kwargs: "Any") -> "Any":
976942
if integration is None:
977943
return f(self, *args, **kwargs)
978944

979-
agent_name, tools = _get_request_data(self, args, kwargs)
980945
start_span_function = get_start_span_function()
981-
946+
run_name = _get_run_name(self, args)
982947
with start_span_function(
983948
op=OP.GEN_AI_INVOKE_AGENT,
984-
name=f"invoke_agent {agent_name}" if agent_name else "invoke_agent",
949+
name=run_name,
985950
origin=LangchainIntegration.origin,
986951
) as span:
987-
_push_agent(agent_name)
988-
try:
989-
if agent_name:
990-
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, agent_name)
952+
if run_name:
953+
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, run_name)
991954

992-
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
993-
span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, False)
955+
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
956+
span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, False)
994957

995-
_set_tools_on_span(span, tools)
958+
tools = _get_available_tools(self)
959+
_set_tools_on_span(span, tools)
996960

997-
# Run the agent
998-
result = f(self, *args, **kwargs)
961+
# Run the agent
962+
result = f(self, *args, **kwargs)
999963

1000-
input = result.get("input")
1001-
if (
1002-
input is not None
1003-
and should_send_default_pii()
1004-
and integration.include_prompts
1005-
):
1006-
normalized_messages = normalize_message_roles([input])
1007-
scope = sentry_sdk.get_current_scope()
1008-
messages_data = truncate_and_annotate_messages(
1009-
normalized_messages, span, scope
964+
input = result.get("input")
965+
if (
966+
input is not None
967+
and should_send_default_pii()
968+
and integration.include_prompts
969+
):
970+
normalized_messages = normalize_message_roles([input])
971+
scope = sentry_sdk.get_current_scope()
972+
messages_data = truncate_and_annotate_messages(
973+
normalized_messages, span, scope
974+
)
975+
if messages_data is not None:
976+
set_data_normalized(
977+
span,
978+
SPANDATA.GEN_AI_REQUEST_MESSAGES,
979+
messages_data,
980+
unpack=False,
1010981
)
1011-
if messages_data is not None:
1012-
set_data_normalized(
1013-
span,
1014-
SPANDATA.GEN_AI_REQUEST_MESSAGES,
1015-
messages_data,
1016-
unpack=False,
1017-
)
1018982

1019-
output = result.get("output")
1020-
if (
1021-
output is not None
1022-
and should_send_default_pii()
1023-
and integration.include_prompts
1024-
):
1025-
set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, output)
983+
output = result.get("output")
984+
if (
985+
output is not None
986+
and should_send_default_pii()
987+
and integration.include_prompts
988+
):
989+
set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, output)
1026990

1027-
return result
1028-
finally:
1029-
# Ensure agent is popped even if an exception occurs
1030-
_pop_agent()
991+
return result
1031992

1032993
return new_invoke
1033994

@@ -1039,24 +1000,31 @@ def new_stream(self: "Any", *args: "Any", **kwargs: "Any") -> "Any":
10391000
if integration is None:
10401001
return f(self, *args, **kwargs)
10411002

1042-
agent_name, tools = _get_request_data(self, args, kwargs)
10431003
start_span_function = get_start_span_function()
10441004

1005+
agent_name = kwargs.get("metadata", {}).get("lc_agent_name")
1006+
run_name = _get_run_name(self, args)
1007+
1008+
span_name = "invoke_agent"
1009+
if agent_name is not None:
1010+
span_name = f"invoke_agent {agent_name}"
1011+
elif run_name:
1012+
span_name = f"invoke_agent {run_name}"
1013+
10451014
span = start_span_function(
10461015
op=OP.GEN_AI_INVOKE_AGENT,
1047-
name=f"invoke_agent {agent_name}" if agent_name else "invoke_agent",
1016+
name=span_name,
10481017
origin=LangchainIntegration.origin,
10491018
)
10501019
span.__enter__()
10511020

1052-
_push_agent(agent_name)
1053-
1054-
if agent_name:
1021+
if agent_name is not None:
10551022
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, agent_name)
10561023

10571024
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
10581025
span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True)
10591026

1027+
tools = _get_available_tools(self)
10601028
_set_tools_on_span(span, tools)
10611029

10621030
input = args[0].get("input") if len(args) >= 1 else None
@@ -1106,7 +1074,6 @@ def new_iterator() -> "Iterator[Any]":
11061074
raise
11071075
finally:
11081076
# Ensure cleanup happens even if iterator is abandoned or fails
1109-
_pop_agent()
11101077
span.__exit__(*exc_info)
11111078

11121079
async def new_iterator_async() -> "AsyncIterator[Any]":
@@ -1132,7 +1099,6 @@ async def new_iterator_async() -> "AsyncIterator[Any]":
11321099
raise
11331100
finally:
11341101
# Ensure cleanup happens even if iterator is abandoned or fails
1135-
_pop_agent()
11361102
span.__exit__(*exc_info)
11371103

11381104
if str(type(result)) == "<class 'async_generator'>":

tests/integrations/langchain/test_langchain.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -254,6 +254,8 @@ def test_langchain_create_agent(
254254
assert len(chat_spans) == 1
255255
assert chat_spans[0]["origin"] == "auto.ai.langchain"
256256

257+
assert chat_spans[0]["data"]["gen_ai.agent.name"] == "word_length_agent"
258+
257259
assert chat_spans[0]["data"]["gen_ai.usage.input_tokens"] == 10
258260
assert chat_spans[0]["data"]["gen_ai.usage.output_tokens"] == 20
259261
assert chat_spans[0]["data"]["gen_ai.usage.total_tokens"] == 30
@@ -410,6 +412,10 @@ def test_tool_execution_span(
410412
assert chat_spans[1]["origin"] == "auto.ai.langchain"
411413
assert tool_exec_span["origin"] == "auto.ai.langchain"
412414

415+
assert chat_spans[0]["data"]["gen_ai.agent.name"] == "word_length_agent"
416+
assert chat_spans[1]["data"]["gen_ai.agent.name"] == "word_length_agent"
417+
assert tool_exec_span["data"]["gen_ai.agent.name"] == "word_length_agent"
418+
413419
assert chat_spans[0]["data"]["gen_ai.usage.input_tokens"] == 142
414420
assert chat_spans[0]["data"]["gen_ai.usage.output_tokens"] == 50
415421
assert chat_spans[0]["data"]["gen_ai.usage.total_tokens"] == 192

0 commit comments

Comments
 (0)