2222
2323import sentry_sdk
2424from sentry_sdk import start_transaction
25+ from sentry_sdk .utils import package_version
2526from sentry_sdk .integrations .langchain import (
2627 LangchainIntegration ,
2728 SentryLangchainCallback ,
3233try :
3334 # langchain v1+
3435 from langchain .tools import tool
36+ from langchain .agents import create_agent
3537 from langchain_classic .agents import AgentExecutor , create_openai_tools_agent # type: ignore[import-not-found]
3638except ImportError :
3739 # langchain <v1
3840 from langchain .agents import tool , AgentExecutor , create_openai_tools_agent
3941
4042from langchain_core .prompts import ChatPromptTemplate , MessagesPlaceholder
41-
43+ from langchain_core . messages import HumanMessage , SystemMessage
4244
4345from openai .types .chat .chat_completion_chunk import (
4446 ChatCompletionChunk ,
5254 CompletionUsage ,
5355)
5456
57+ LANGCHAIN_VERSION = package_version ("langchain" )
58+
5559
5660@tool
5761def get_word_length (word : str ) -> int :
@@ -79,6 +83,129 @@ def _llm_type(self) -> str:
7983 return llm_type
8084
8185
86+ @pytest .mark .skipif (
87+ LANGCHAIN_VERSION < (1 ,),
88+ reason = "LangChain 1.0+ required (ONE AGENT refactor)" ,
89+ )
90+ @pytest .mark .parametrize (
91+ "send_default_pii, include_prompts" ,
92+ [
93+ (True , True ),
94+ (True , False ),
95+ (False , True ),
96+ (False , False ),
97+ ],
98+ )
99+ @pytest .mark .parametrize (
100+ "system_instructions_content" ,
101+ [
102+ "You are very powerful assistant, but don't know current events" ,
103+ [
104+ {"type" : "text" , "text" : "You are a helpful assistant." },
105+ {"type" : "text" , "text" : "Be concise and clear." },
106+ ],
107+ ],
108+ ids = ["string" , "blocks" ],
109+ )
110+ def test_langchain_create_agent (
111+ sentry_init ,
112+ capture_events ,
113+ send_default_pii ,
114+ include_prompts ,
115+ system_instructions_content ,
116+ request ,
117+ get_model_response ,
118+ nonstreaming_responses_model_response ,
119+ ):
120+ sentry_init (
121+ integrations = [
122+ LangchainIntegration (
123+ include_prompts = include_prompts ,
124+ )
125+ ],
126+ traces_sample_rate = 1.0 ,
127+ send_default_pii = send_default_pii ,
128+ )
129+ events = capture_events ()
130+
131+ model_response = get_model_response (
132+ nonstreaming_responses_model_response ,
133+ serialize_pydantic = True ,
134+ request_headers = {
135+ "X-Stainless-Raw-Response" : "True" ,
136+ },
137+ )
138+
139+ llm = ChatOpenAI (
140+ model_name = "gpt-3.5-turbo" ,
141+ temperature = 0 ,
142+ openai_api_key = "badkey" ,
143+ use_responses_api = True ,
144+ )
145+ agent = create_agent (
146+ model = llm ,
147+ tools = [get_word_length ],
148+ system_prompt = SystemMessage (content = system_instructions_content ),
149+ name = "word_length_agent" ,
150+ )
151+
152+ with patch .object (
153+ llm .client ._client ._client ,
154+ "send" ,
155+ return_value = model_response ,
156+ ) as _ :
157+ with start_transaction ():
158+ agent .invoke (
159+ {
160+ "messages" : [
161+ HumanMessage (content = "How many letters in the word eudca" ),
162+ ],
163+ },
164+ )
165+
166+ tx = events [0 ]
167+ assert tx ["type" ] == "transaction"
168+ assert tx ["contexts" ]["trace" ]["origin" ] == "manual"
169+
170+ chat_spans = list (x for x in tx ["spans" ] if x ["op" ] == "gen_ai.chat" )
171+ assert len (chat_spans ) == 1
172+ assert chat_spans [0 ]["origin" ] == "auto.ai.langchain"
173+
174+ assert chat_spans [0 ]["data" ]["gen_ai.usage.input_tokens" ] == 10
175+ assert chat_spans [0 ]["data" ]["gen_ai.usage.output_tokens" ] == 20
176+ assert chat_spans [0 ]["data" ]["gen_ai.usage.total_tokens" ] == 30
177+
178+ if send_default_pii and include_prompts :
179+ assert (
180+ chat_spans [0 ]["data" ][SPANDATA .GEN_AI_RESPONSE_TEXT ]
181+ == "Hello, how can I help you?"
182+ )
183+
184+ param_id = request .node .callspec .id
185+ if "string" in param_id :
186+ assert [
187+ {
188+ "type" : "text" ,
189+ "content" : "You are very powerful assistant, but don't know current events" ,
190+ }
191+ ] == json .loads (chat_spans [0 ]["data" ][SPANDATA .GEN_AI_SYSTEM_INSTRUCTIONS ])
192+ else :
193+ assert [
194+ {
195+ "type" : "text" ,
196+ "content" : "You are a helpful assistant." ,
197+ },
198+ {
199+ "type" : "text" ,
200+ "content" : "Be concise and clear." ,
201+ },
202+ ] == json .loads (chat_spans [0 ]["data" ][SPANDATA .GEN_AI_SYSTEM_INSTRUCTIONS ])
203+ else :
204+ assert SPANDATA .GEN_AI_SYSTEM_INSTRUCTIONS not in chat_spans [0 ].get ("data" , {})
205+ assert SPANDATA .GEN_AI_REQUEST_MESSAGES not in chat_spans [0 ].get ("data" , {})
206+ assert SPANDATA .GEN_AI_RESPONSE_TEXT not in chat_spans [0 ].get ("data" , {})
207+
208+
82209@pytest .mark .parametrize (
83210 "send_default_pii, include_prompts" ,
84211 [
@@ -100,7 +227,7 @@ def _llm_type(self) -> str:
100227 ],
101228 ids = ["string" , "list" , "blocks" ],
102229)
103- def test_langchain_agent (
230+ def test_langchain_openai_tools_agent (
104231 sentry_init ,
105232 capture_events ,
106233 send_default_pii ,
0 commit comments