3838 from langchain .agents import tool , AgentExecutor , create_openai_tools_agent
3939
4040from langchain_core .prompts import ChatPromptTemplate , MessagesPlaceholder
41-
41+ from langchain .agents import create_agent
42+ from langchain_core .messages import HumanMessage , SystemMessage
4243
4344from openai .types .chat .chat_completion_chunk import (
4445 ChatCompletionChunk ,
@@ -81,6 +82,128 @@ def _llm_type(self) -> str:
8182 return llm_type
8283
8384
85+ @pytest .mark .parametrize (
86+ "send_default_pii, include_prompts" ,
87+ [
88+ (True , True ),
89+ (True , False ),
90+ (False , True ),
91+ (False , False ),
92+ ],
93+ )
94+ @pytest .mark .parametrize (
95+ "system_instructions_content" ,
96+ [
97+ "You are very powerful assistant, but don't know current events" ,
98+ [
99+ {"type" : "text" , "text" : "You are a helpful assistant." },
100+ {"type" : "text" , "text" : "Be concise and clear." },
101+ ],
102+ ],
103+ ids = ["string" , "blocks" ],
104+ )
105+ def test_langchain_create_agent (
106+ sentry_init ,
107+ capture_events ,
108+ send_default_pii ,
109+ include_prompts ,
110+ system_instructions_content ,
111+ request ,
112+ get_model_response ,
113+ nonstreaming_responses_model_response ,
114+ ):
115+ sentry_init (
116+ integrations = [
117+ LangchainIntegration (
118+ include_prompts = include_prompts ,
119+ )
120+ ],
121+ traces_sample_rate = 1.0 ,
122+ send_default_pii = send_default_pii ,
123+ )
124+ events = capture_events ()
125+
126+ model_repsonse = get_model_response (
127+ nonstreaming_responses_model_response ,
128+ serialize_pydantic = True ,
129+ request_headers = {
130+ "X-Stainless-Raw-Response" : "True" ,
131+ },
132+ )
133+
134+ llm = ChatOpenAI (
135+ model_name = "gpt-3.5-turbo" ,
136+ temperature = 0 ,
137+ openai_api_key = "badkey" ,
138+ use_responses_api = True ,
139+ )
140+ agent = create_agent (
141+ model = llm ,
142+ tools = [get_word_length ],
143+ system_prompt = SystemMessage (content = system_instructions_content ),
144+ name = "word_length_agent" ,
145+ )
146+
147+ with patch .object (
148+ llm .client ._client ._client ,
149+ "send" ,
150+ return_value = model_repsonse ,
151+ ) as _ :
152+ with start_transaction ():
153+ agent .invoke (
154+ {
155+ "messages" : [
156+ HumanMessage (content = "How many letters in the word eudca" ),
157+ ],
158+ },
159+ )
160+
161+ tx = events [0 ]
162+ assert tx ["type" ] == "transaction"
163+ assert tx ["contexts" ]["trace" ]["origin" ] == "manual"
164+
165+ chat_spans = list (x for x in tx ["spans" ] if x ["op" ] == "gen_ai.chat" )
166+ assert len (chat_spans ) == 1
167+ assert chat_spans [0 ]["origin" ] == "auto.ai.langchain"
168+
169+ # Token usage is only available in newer versions of langchain (v0.2+)
170+ # where usage_metadata is supported on AIMessageChunk
171+ if "gen_ai.usage.input_tokens" in chat_spans [0 ]["data" ]:
172+ assert chat_spans [0 ]["data" ]["gen_ai.usage.input_tokens" ] == 10
173+ assert chat_spans [0 ]["data" ]["gen_ai.usage.output_tokens" ] == 20
174+ assert chat_spans [0 ]["data" ]["gen_ai.usage.total_tokens" ] == 30
175+
176+ if send_default_pii and include_prompts :
177+ assert (
178+ chat_spans [0 ]["data" ][SPANDATA .GEN_AI_RESPONSE_TEXT ]
179+ == "Hello, how can I help you?"
180+ )
181+
182+ param_id = request .node .callspec .id
183+ if "string" in param_id :
184+ assert [
185+ {
186+ "type" : "text" ,
187+ "content" : "You are very powerful assistant, but don't know current events" ,
188+ }
189+ ] == json .loads (chat_spans [0 ]["data" ][SPANDATA .GEN_AI_SYSTEM_INSTRUCTIONS ])
190+ else :
191+ assert [
192+ {
193+ "type" : "text" ,
194+ "content" : "You are a helpful assistant." ,
195+ },
196+ {
197+ "type" : "text" ,
198+ "content" : "Be concise and clear." ,
199+ },
200+ ] == json .loads (chat_spans [0 ]["data" ][SPANDATA .GEN_AI_SYSTEM_INSTRUCTIONS ])
201+ else :
202+ assert SPANDATA .GEN_AI_SYSTEM_INSTRUCTIONS not in chat_spans [0 ].get ("data" , {})
203+ assert SPANDATA .GEN_AI_REQUEST_MESSAGES not in chat_spans [0 ].get ("data" , {})
204+ assert SPANDATA .GEN_AI_RESPONSE_TEXT not in chat_spans [0 ].get ("data" , {})
205+
206+
84207@pytest .mark .parametrize (
85208 "send_default_pii, include_prompts, use_unknown_llm_type" ,
86209 [
@@ -102,7 +225,7 @@ def _llm_type(self) -> str:
102225 ],
103226 ids = ["string" , "list" , "blocks" ],
104227)
105- def test_langchain_agent (
228+ def test_langchain_openai_tools_agent (
106229 sentry_init ,
107230 capture_events ,
108231 send_default_pii ,
0 commit comments