@@ -211,7 +211,7 @@ def test_langchain_create_agent(
211211 )
212212 events = capture_events ()
213213
214- model_repsonse = get_model_response (
214+ model_response = get_model_response (
215215 nonstreaming_responses_model_response ,
216216 serialize_pydantic = True ,
217217 request_headers = {
@@ -235,7 +235,7 @@ def test_langchain_create_agent(
235235 with patch .object (
236236 llm .client ._client ._client ,
237237 "send" ,
238- return_value = model_repsonse ,
238+ return_value = model_response ,
239239 ) as _ :
240240 with start_transaction ():
241241 agent .invoke (
@@ -254,12 +254,9 @@ def test_langchain_create_agent(
254254 assert len (chat_spans ) == 1
255255 assert chat_spans [0 ]["origin" ] == "auto.ai.langchain"
256256
257- # Token usage is only available in newer versions of langchain (v0.2+)
258- # where usage_metadata is supported on AIMessageChunk
259- if "gen_ai.usage.input_tokens" in chat_spans [0 ]["data" ]:
260- assert chat_spans [0 ]["data" ]["gen_ai.usage.input_tokens" ] == 10
261- assert chat_spans [0 ]["data" ]["gen_ai.usage.output_tokens" ] == 20
262- assert chat_spans [0 ]["data" ]["gen_ai.usage.total_tokens" ] == 30
257+ assert chat_spans [0 ]["data" ]["gen_ai.usage.input_tokens" ] == 10
258+ assert chat_spans [0 ]["data" ]["gen_ai.usage.output_tokens" ] == 20
259+ assert chat_spans [0 ]["data" ]["gen_ai.usage.total_tokens" ] == 30
263260
264261 if send_default_pii and include_prompts :
265262 assert (
@@ -411,17 +408,13 @@ def test_tool_execution_span(
411408 assert chat_spans [1 ]["origin" ] == "auto.ai.langchain"
412409 assert tool_exec_span ["origin" ] == "auto.ai.langchain"
413410
414- # Token usage is only available in newer versions of langchain (v0.2+)
415- # where usage_metadata is supported on AIMessageChunk
416- if "gen_ai.usage.input_tokens" in chat_spans [0 ]["data" ]:
417- assert chat_spans [0 ]["data" ]["gen_ai.usage.input_tokens" ] == 142
418- assert chat_spans [0 ]["data" ]["gen_ai.usage.output_tokens" ] == 50
419- assert chat_spans [0 ]["data" ]["gen_ai.usage.total_tokens" ] == 192
411+ assert chat_spans [0 ]["data" ]["gen_ai.usage.input_tokens" ] == 142
412+ assert chat_spans [0 ]["data" ]["gen_ai.usage.output_tokens" ] == 50
413+ assert chat_spans [0 ]["data" ]["gen_ai.usage.total_tokens" ] == 192
420414
421- if "gen_ai.usage.input_tokens" in chat_spans [1 ]["data" ]:
422- assert chat_spans [1 ]["data" ]["gen_ai.usage.input_tokens" ] == 89
423- assert chat_spans [1 ]["data" ]["gen_ai.usage.output_tokens" ] == 28
424- assert chat_spans [1 ]["data" ]["gen_ai.usage.total_tokens" ] == 117
415+ assert chat_spans [1 ]["data" ]["gen_ai.usage.input_tokens" ] == 89
416+ assert chat_spans [1 ]["data" ]["gen_ai.usage.output_tokens" ] == 28
417+ assert chat_spans [1 ]["data" ]["gen_ai.usage.total_tokens" ] == 117
425418
426419 if send_default_pii and include_prompts :
427420 assert "word" in tool_exec_span ["data" ][SPANDATA .GEN_AI_TOOL_INPUT ]
0 commit comments