Skip to content

Commit a5d04d6

Browse files
authored
feat(langchain): Set gen_ai.operation.name and gen_ai.pipeline.name on LLM spans (#5849)
1 parent 72483bb commit a5d04d6

File tree

2 files changed

+13
-2
lines changed

2 files changed

+13
-2
lines changed

sentry_sdk/integrations/langchain.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -383,6 +383,12 @@ def on_llm_start(
383383
)
384384
span = watched_span.span
385385

386+
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "generate_text")
387+
388+
pipeline_name = kwargs.get("name")
389+
if pipeline_name:
390+
span.set_data(SPANDATA.GEN_AI_PIPELINE_NAME, pipeline_name)
391+
386392
if model:
387393
span.set_data(
388394
SPANDATA.GEN_AI_REQUEST_MODEL,

tests/integrations/langchain/test_langchain.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -147,7 +147,7 @@ def test_langchain_text_completion(
147147
) as _:
148148
with start_transaction():
149149
input_text = "What is the capital of France?"
150-
model.invoke(input_text)
150+
model.invoke(input_text, config={"run_name": "my-snazzy-pipeline"})
151151

152152
tx = events[0]
153153
assert tx["type"] == "transaction"
@@ -160,6 +160,7 @@ def test_langchain_text_completion(
160160
llm_span = llm_spans[0]
161161
assert llm_span["description"] == "generate_text gpt-3.5-turbo"
162162
assert llm_span["data"]["gen_ai.system"] == "openai"
163+
assert llm_span["data"]["gen_ai.pipeline.name"] == "my-snazzy-pipeline"
163164
assert llm_span["data"]["gen_ai.request.model"] == "gpt-3.5-turbo"
164165
assert llm_span["data"]["gen_ai.response.text"] == "The capital of France is Paris."
165166
assert llm_span["data"]["gen_ai.usage.total_tokens"] == 25
@@ -1268,6 +1269,7 @@ def test_langchain_message_truncation(sentry_init, capture_events):
12681269
serialized=serialized,
12691270
prompts=prompts,
12701271
run_id=run_id,
1272+
name="my_pipeline",
12711273
invocation_params={
12721274
"temperature": 0.7,
12731275
"max_tokens": 100,
@@ -1297,8 +1299,10 @@ def test_langchain_message_truncation(sentry_init, capture_events):
12971299
assert len(llm_spans) > 0
12981300

12991301
llm_span = llm_spans[0]
1300-
assert SPANDATA.GEN_AI_REQUEST_MESSAGES in llm_span["data"]
1302+
assert llm_span["data"]["gen_ai.operation.name"] == "generate_text"
1303+
assert llm_span["data"][SPANDATA.GEN_AI_PIPELINE_NAME] == "my_pipeline"
13011304

1305+
assert SPANDATA.GEN_AI_REQUEST_MESSAGES in llm_span["data"]
13021306
messages_data = llm_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]
13031307
assert isinstance(messages_data, str)
13041308

@@ -2011,6 +2015,7 @@ def test_langchain_response_model_extraction(
20112015
assert len(llm_spans) > 0
20122016

20132017
llm_span = llm_spans[0]
2018+
assert llm_span["data"]["gen_ai.operation.name"] == "generate_text"
20142019

20152020
if expected_model is not None:
20162021
assert SPANDATA.GEN_AI_RESPONSE_MODEL in llm_span["data"]

0 commit comments

Comments
 (0)