@@ -256,6 +256,7 @@ def test_langchain_create_agent(
256256 assert len (chat_spans ) == 1
257257 assert chat_spans [0 ]["origin" ] == "auto.ai.langchain"
258258
259+ assert chat_spans [0 ]["data" ]["gen_ai.system" ] == "openai-chat"
259260 assert chat_spans [0 ]["data" ]["gen_ai.usage.input_tokens" ] == 10
260261 assert chat_spans [0 ]["data" ]["gen_ai.usage.output_tokens" ] == 20
261262 assert chat_spans [0 ]["data" ]["gen_ai.usage.total_tokens" ] == 30
@@ -415,10 +416,12 @@ def test_tool_execution_span(
415416 assert chat_spans [0 ]["data" ]["gen_ai.usage.input_tokens" ] == 142
416417 assert chat_spans [0 ]["data" ]["gen_ai.usage.output_tokens" ] == 50
417418 assert chat_spans [0 ]["data" ]["gen_ai.usage.total_tokens" ] == 192
419+ assert chat_spans [0 ]["data" ]["gen_ai.system" ] == "openai-chat"
418420
419421 assert chat_spans [1 ]["data" ]["gen_ai.usage.input_tokens" ] == 89
420422 assert chat_spans [1 ]["data" ]["gen_ai.usage.output_tokens" ] == 28
421423 assert chat_spans [1 ]["data" ]["gen_ai.usage.total_tokens" ] == 117
424+ assert chat_spans [1 ]["data" ]["gen_ai.system" ] == "openai-chat"
422425
423426 if send_default_pii and include_prompts :
424427 assert "word" in tool_exec_span ["data" ][SPANDATA .GEN_AI_TOOL_INPUT ]
@@ -2232,6 +2235,94 @@ def test_transform_google_file_data(self):
22322235 }
22332236
22342237
2238+ @pytest .mark .parametrize (
2239+ "ai_type,expected_system" ,
2240+ [
2241+ # Real LangChain _type values (from _llm_type properties)
2242+ # OpenAI
2243+ ("openai-chat" , "openai-chat" ),
2244+ ("openai" , "openai" ),
2245+ # Azure OpenAI
2246+ ("azure-openai-chat" , "azure-openai-chat" ),
2247+ ("azure" , "azure" ),
2248+ # Anthropic
2249+ ("anthropic-chat" , "anthropic-chat" ),
2250+ # Google
2251+ ("vertexai" , "vertexai" ),
2252+ ("chat-google-generative-ai" , "chat-google-generative-ai" ),
2253+ ("google_gemini" , "google_gemini" ),
2254+ # AWS Bedrock
2255+ ("amazon_bedrock_chat" , "amazon_bedrock_chat" ),
2256+ ("amazon_bedrock" , "amazon_bedrock" ),
2257+ # Cohere
2258+ ("cohere-chat" , "cohere-chat" ),
2259+ # Ollama
2260+ ("chat-ollama" , "chat-ollama" ),
2261+ ("ollama-llm" , "ollama-llm" ),
2262+ # Mistral
2263+ ("mistralai-chat" , "mistralai-chat" ),
2264+ # Fireworks
2265+ ("fireworks-chat" , "fireworks-chat" ),
2266+ ("fireworks" , "fireworks" ),
2267+ # HuggingFace
2268+ ("huggingface-chat-wrapper" , "huggingface-chat-wrapper" ),
2269+ # Groq
2270+ ("groq-chat" , "groq-chat" ),
2271+ # NVIDIA
2272+ ("chat-nvidia-ai-playground" , "chat-nvidia-ai-playground" ),
2273+ # xAI
2274+ ("xai-chat" , "xai-chat" ),
2275+ # DeepSeek
2276+ ("chat-deepseek" , "chat-deepseek" ),
2277+ # Edge cases
2278+ ("" , None ),
2279+ (None , None ),
2280+ ],
2281+ )
2282+ def test_langchain_ai_system_detection (
2283+ sentry_init , capture_events , ai_type , expected_system
2284+ ):
2285+ sentry_init (
2286+ integrations = [LangchainIntegration ()],
2287+ traces_sample_rate = 1.0 ,
2288+ )
2289+ events = capture_events ()
2290+
2291+ callback = SentryLangchainCallback (max_span_map_size = 100 , include_prompts = True )
2292+
2293+ run_id = "test-ai-system-uuid"
2294+ serialized = {"_type" : ai_type } if ai_type is not None else {}
2295+ prompts = ["Test prompt" ]
2296+
2297+ with start_transaction ():
2298+ callback .on_llm_start (
2299+ serialized = serialized ,
2300+ prompts = prompts ,
2301+ run_id = run_id ,
2302+ invocation_params = {"_type" : ai_type , "model" : "test-model" },
2303+ )
2304+
2305+ generation = Mock (text = "Test response" , message = None )
2306+ response = Mock (generations = [[generation ]])
2307+ callback .on_llm_end (response = response , run_id = run_id )
2308+
2309+ assert len (events ) > 0
2310+ tx = events [0 ]
2311+ assert tx ["type" ] == "transaction"
2312+
2313+ llm_spans = [
2314+ span for span in tx .get ("spans" , []) if span .get ("op" ) == "gen_ai.generate_text"
2315+ ]
2316+ assert len (llm_spans ) > 0
2317+
2318+ llm_span = llm_spans [0 ]
2319+
2320+ if expected_system is not None :
2321+ assert llm_span ["data" ][SPANDATA .GEN_AI_SYSTEM ] == expected_system
2322+ else :
2323+ assert SPANDATA .GEN_AI_SYSTEM not in llm_span .get ("data" , {})
2324+
2325+
22352326class TestTransformLangchainMessageContent :
22362327 """Tests for _transform_langchain_message_content function."""
22372328
0 commit comments