2727 SentryLangchainCallback ,
2828 _transform_langchain_content_block ,
2929 _transform_langchain_message_content ,
30+ _push_agent ,
31+ _pop_agent ,
3032)
3133
3234try :
@@ -851,12 +853,15 @@ def test_langchain_integration_with_langchain_core_only(sentry_init, capture_eve
851853 assert tx ["type" ] == "transaction"
852854
853855 llm_spans = [
854- span for span in tx .get ("spans" , []) if span .get ("op" ) == "gen_ai.pipeline"
856+ span
857+ for span in tx .get ("spans" , [])
858+ if span .get ("op" ) == "gen_ai.generate_text"
855859 ]
856860 assert len (llm_spans ) > 0
857861
858862 llm_span = llm_spans [0 ]
859- assert llm_span ["description" ] == "Langchain LLM call"
863+ assert llm_span ["description" ] == "generate_text gpt-3.5-turbo"
864+ assert llm_span ["data" ]["gen_ai.operation.name" ] == "generate_text"
860865 assert llm_span ["data" ]["gen_ai.request.model" ] == "gpt-3.5-turbo"
861866 assert (
862867 llm_span ["data" ]["gen_ai.response.text" ]
@@ -867,6 +872,110 @@ def test_langchain_integration_with_langchain_core_only(sentry_init, capture_eve
867872 assert llm_span ["data" ]["gen_ai.usage.output_tokens" ] == 15
868873
869874
875+ def test_langchain_llm_span_includes_agent_name (sentry_init , capture_events ):
876+ from langchain_core .outputs import LLMResult , Generation
877+
878+ with patch ("sentry_sdk.integrations.langchain.AgentExecutor" , None ):
879+ from sentry_sdk .integrations .langchain import (
880+ LangchainIntegration ,
881+ SentryLangchainCallback ,
882+ )
883+
884+ sentry_init (
885+ integrations = [LangchainIntegration (include_prompts = True )],
886+ traces_sample_rate = 1.0 ,
887+ send_default_pii = True ,
888+ )
889+ events = capture_events ()
890+
891+ callback = SentryLangchainCallback (max_span_map_size = 100 , include_prompts = True )
892+
893+ run_id = "12345678-1234-1234-1234-123456789abc"
894+ serialized = {"_type" : "openai" , "model_name" : "gpt-3.5-turbo" }
895+ prompts = ["Hello" ]
896+
897+ with start_transaction ():
898+ _push_agent ("test-agent" )
899+ try :
900+ callback .on_llm_start (
901+ serialized = serialized ,
902+ prompts = prompts ,
903+ run_id = run_id ,
904+ invocation_params = {"model" : "gpt-3.5-turbo" },
905+ )
906+
907+ response = LLMResult (
908+ generations = [[Generation (text = "Hi" )]],
909+ llm_output = {},
910+ )
911+ callback .on_llm_end (response = response , run_id = run_id )
912+ finally :
913+ _pop_agent ()
914+
915+ assert len (events ) > 0
916+ tx = events [0 ]
917+
918+ llm_spans = [
919+ span
920+ for span in tx .get ("spans" , [])
921+ if span .get ("op" ) == "gen_ai.generate_text"
922+ ]
923+ assert len (llm_spans ) == 1
924+
925+ llm_span = llm_spans [0 ]
926+ assert llm_span ["data" ][SPANDATA .GEN_AI_AGENT_NAME ] == "test-agent"
927+
928+
929+ def test_langchain_llm_span_no_agent_name_when_no_agent (sentry_init , capture_events ):
930+ from langchain_core .outputs import LLMResult , Generation
931+
932+ with patch ("sentry_sdk.integrations.langchain.AgentExecutor" , None ):
933+ from sentry_sdk .integrations .langchain import (
934+ LangchainIntegration ,
935+ SentryLangchainCallback ,
936+ )
937+
938+ sentry_init (
939+ integrations = [LangchainIntegration (include_prompts = True )],
940+ traces_sample_rate = 1.0 ,
941+ send_default_pii = True ,
942+ )
943+ events = capture_events ()
944+
945+ callback = SentryLangchainCallback (max_span_map_size = 100 , include_prompts = True )
946+
947+ run_id = "12345678-1234-1234-1234-123456789def"
948+ serialized = {"_type" : "openai" , "model_name" : "gpt-3.5-turbo" }
949+ prompts = ["Hello" ]
950+
951+ with start_transaction ():
952+ callback .on_llm_start (
953+ serialized = serialized ,
954+ prompts = prompts ,
955+ run_id = run_id ,
956+ invocation_params = {"model" : "gpt-3.5-turbo" },
957+ )
958+
959+ response = LLMResult (
960+ generations = [[Generation (text = "Hi" )]],
961+ llm_output = {},
962+ )
963+ callback .on_llm_end (response = response , run_id = run_id )
964+
965+ assert len (events ) > 0
966+ tx = events [0 ]
967+
968+ llm_spans = [
969+ span
970+ for span in tx .get ("spans" , [])
971+ if span .get ("op" ) == "gen_ai.generate_text"
972+ ]
973+ assert len (llm_spans ) == 1
974+
975+ llm_span = llm_spans [0 ]
976+ assert SPANDATA .GEN_AI_AGENT_NAME not in llm_span ["data" ]
977+
978+
870979def test_langchain_message_role_mapping (sentry_init , capture_events ):
871980 """Test that message roles are properly normalized in langchain integration."""
872981 global llm_type
@@ -1062,11 +1171,12 @@ def test_langchain_message_truncation(sentry_init, capture_events):
10621171 assert tx ["type" ] == "transaction"
10631172
10641173 llm_spans = [
1065- span for span in tx .get ("spans" , []) if span .get ("op" ) == "gen_ai.pipeline "
1174+ span for span in tx .get ("spans" , []) if span .get ("op" ) == "gen_ai.generate_text "
10661175 ]
10671176 assert len (llm_spans ) > 0
10681177
10691178 llm_span = llm_spans [0 ]
1179+ assert llm_span ["data" ]["gen_ai.operation.name" ] == "generate_text"
10701180 assert SPANDATA .GEN_AI_REQUEST_MESSAGES in llm_span ["data" ]
10711181
10721182 messages_data = llm_span ["data" ][SPANDATA .GEN_AI_REQUEST_MESSAGES ]
@@ -1776,11 +1886,12 @@ def test_langchain_response_model_extraction(
17761886 assert tx ["type" ] == "transaction"
17771887
17781888 llm_spans = [
1779- span for span in tx .get ("spans" , []) if span .get ("op" ) == "gen_ai.pipeline "
1889+ span for span in tx .get ("spans" , []) if span .get ("op" ) == "gen_ai.generate_text "
17801890 ]
17811891 assert len (llm_spans ) > 0
17821892
17831893 llm_span = llm_spans [0 ]
1894+ assert llm_span ["data" ]["gen_ai.operation.name" ] == "generate_text"
17841895
17851896 if expected_model is not None :
17861897 assert SPANDATA .GEN_AI_RESPONSE_MODEL in llm_span ["data" ]
0 commit comments