@@ -159,6 +159,7 @@ def test_langchain_text_completion(
159159
160160 llm_span = llm_spans [0 ]
161161 assert llm_span ["description" ] == "generate_text gpt-3.5-turbo"
162+ assert llm_span ["data" ]["gen_ai.system" ] == "openai"
162163 assert llm_span ["data" ]["gen_ai.request.model" ] == "gpt-3.5-turbo"
163164 assert llm_span ["data" ]["gen_ai.response.text" ] == "The capital of France is Paris."
164165 assert llm_span ["data" ]["gen_ai.usage.total_tokens" ] == 25
@@ -254,6 +255,7 @@ def test_langchain_create_agent(
254255 assert len (chat_spans ) == 1
255256 assert chat_spans [0 ]["origin" ] == "auto.ai.langchain"
256257
258+ assert chat_spans [0 ]["data" ]["gen_ai.system" ] == "openai-chat"
257259 assert chat_spans [0 ]["data" ]["gen_ai.usage.input_tokens" ] == 10
258260 assert chat_spans [0 ]["data" ]["gen_ai.usage.output_tokens" ] == 20
259261 assert chat_spans [0 ]["data" ]["gen_ai.usage.total_tokens" ] == 30
@@ -413,10 +415,12 @@ def test_tool_execution_span(
413415 assert chat_spans [0 ]["data" ]["gen_ai.usage.input_tokens" ] == 142
414416 assert chat_spans [0 ]["data" ]["gen_ai.usage.output_tokens" ] == 50
415417 assert chat_spans [0 ]["data" ]["gen_ai.usage.total_tokens" ] == 192
418+ assert chat_spans [0 ]["data" ]["gen_ai.system" ] == "openai-chat"
416419
417420 assert chat_spans [1 ]["data" ]["gen_ai.usage.input_tokens" ] == 89
418421 assert chat_spans [1 ]["data" ]["gen_ai.usage.output_tokens" ] == 28
419422 assert chat_spans [1 ]["data" ]["gen_ai.usage.total_tokens" ] == 117
423+ assert chat_spans [1 ]["data" ]["gen_ai.system" ] == "openai-chat"
420424
421425 if send_default_pii and include_prompts :
422426 assert "word" in tool_exec_span ["data" ][SPANDATA .GEN_AI_TOOL_INPUT ]
@@ -2226,6 +2230,94 @@ def test_transform_google_file_data(self):
22262230 }
22272231
22282232
2233+ @pytest .mark .parametrize (
2234+ "ai_type,expected_system" ,
2235+ [
2236+ # Real LangChain _type values (from _llm_type properties)
2237+ # OpenAI
2238+ ("openai-chat" , "openai-chat" ),
2239+ ("openai" , "openai" ),
2240+ # Azure OpenAI
2241+ ("azure-openai-chat" , "azure-openai-chat" ),
2242+ ("azure" , "azure" ),
2243+ # Anthropic
2244+ ("anthropic-chat" , "anthropic-chat" ),
2245+ # Google
2246+ ("vertexai" , "vertexai" ),
2247+ ("chat-google-generative-ai" , "chat-google-generative-ai" ),
2248+ ("google_gemini" , "google_gemini" ),
2249+ # AWS Bedrock
2250+ ("amazon_bedrock_chat" , "amazon_bedrock_chat" ),
2251+ ("amazon_bedrock" , "amazon_bedrock" ),
2252+ # Cohere
2253+ ("cohere-chat" , "cohere-chat" ),
2254+ # Ollama
2255+ ("chat-ollama" , "chat-ollama" ),
2256+ ("ollama-llm" , "ollama-llm" ),
2257+ # Mistral
2258+ ("mistralai-chat" , "mistralai-chat" ),
2259+ # Fireworks
2260+ ("fireworks-chat" , "fireworks-chat" ),
2261+ ("fireworks" , "fireworks" ),
2262+ # HuggingFace
2263+ ("huggingface-chat-wrapper" , "huggingface-chat-wrapper" ),
2264+ # Groq
2265+ ("groq-chat" , "groq-chat" ),
2266+ # NVIDIA
2267+ ("chat-nvidia-ai-playground" , "chat-nvidia-ai-playground" ),
2268+ # xAI
2269+ ("xai-chat" , "xai-chat" ),
2270+ # DeepSeek
2271+ ("chat-deepseek" , "chat-deepseek" ),
2272+ # Edge cases
2273+ ("" , None ),
2274+ (None , None ),
2275+ ],
2276+ )
2277+ def test_langchain_ai_system_detection (
2278+ sentry_init , capture_events , ai_type , expected_system
2279+ ):
2280+ sentry_init (
2281+ integrations = [LangchainIntegration ()],
2282+ traces_sample_rate = 1.0 ,
2283+ )
2284+ events = capture_events ()
2285+
2286+ callback = SentryLangchainCallback (max_span_map_size = 100 , include_prompts = True )
2287+
2288+ run_id = "test-ai-system-uuid"
2289+ serialized = {"_type" : ai_type } if ai_type is not None else {}
2290+ prompts = ["Test prompt" ]
2291+
2292+ with start_transaction ():
2293+ callback .on_llm_start (
2294+ serialized = serialized ,
2295+ prompts = prompts ,
2296+ run_id = run_id ,
2297+ invocation_params = {"_type" : ai_type , "model" : "test-model" },
2298+ )
2299+
2300+ generation = Mock (text = "Test response" , message = None )
2301+ response = Mock (generations = [[generation ]])
2302+ callback .on_llm_end (response = response , run_id = run_id )
2303+
2304+ assert len (events ) > 0
2305+ tx = events [0 ]
2306+ assert tx ["type" ] == "transaction"
2307+
2308+ llm_spans = [
2309+ span for span in tx .get ("spans" , []) if span .get ("op" ) == "gen_ai.generate_text"
2310+ ]
2311+ assert len (llm_spans ) > 0
2312+
2313+ llm_span = llm_spans [0 ]
2314+
2315+ if expected_system is not None :
2316+ assert llm_span ["data" ][SPANDATA .GEN_AI_SYSTEM ] == expected_system
2317+ else :
2318+ assert SPANDATA .GEN_AI_SYSTEM not in llm_span .get ("data" , {})
2319+
2320+
22292321class TestTransformLangchainMessageContent :
22302322 """Tests for _transform_langchain_message_content function."""
22312323
0 commit comments