Skip to content

Commit 9193eb0

Browse files
test(langchain): Add text completion test (#5740)
Replace test with manual hook calls with a test that uses the library in a way that the same hooks are invoked.
1 parent f465307 commit 9193eb0

File tree

1 file changed

+76
-79
lines changed

1 file changed

+76
-79
lines changed

tests/integrations/langchain/test_langchain.py

Lines changed: 76 additions & 79 deletions
Original file line numberDiff line numberDiff line change
@@ -9,9 +9,10 @@
99

1010
try:
1111
# Langchain >= 0.2
12-
from langchain_openai import ChatOpenAI
12+
from langchain_openai import ChatOpenAI, OpenAI
1313
except ImportError:
1414
# Langchain < 0.2
15+
from langchain_community.llms import OpenAI
1516
from langchain_community.chat_models import ChatOpenAI
1617

1718
from langchain_core.callbacks import BaseCallbackManager, CallbackManagerForLLMRun
@@ -50,6 +51,9 @@
5051
ChoiceDeltaToolCallFunction,
5152
)
5253

54+
from openai.types.completion import Completion
55+
from openai.types.completion_choice import CompletionChoice
56+
5357
from openai.types.completion_usage import (
5458
CompletionUsage,
5559
)
@@ -91,6 +95,77 @@ def _llm_type(self) -> str:
9195
return llm_type
9296

9397

98+
def test_langchain_text_completion(
99+
sentry_init,
100+
capture_events,
101+
get_model_response,
102+
):
103+
sentry_init(
104+
integrations=[
105+
LangchainIntegration(
106+
include_prompts=True,
107+
)
108+
],
109+
traces_sample_rate=1.0,
110+
send_default_pii=True,
111+
)
112+
events = capture_events()
113+
114+
model_response = get_model_response(
115+
Completion(
116+
id="completion-id",
117+
object="text_completion",
118+
created=10000000,
119+
model="gpt-3.5-turbo",
120+
choices=[
121+
CompletionChoice(
122+
index=0,
123+
finish_reason="stop",
124+
text="The capital of France is Paris.",
125+
)
126+
],
127+
usage=CompletionUsage(
128+
prompt_tokens=10,
129+
completion_tokens=15,
130+
total_tokens=25,
131+
),
132+
),
133+
serialize_pydantic=True,
134+
)
135+
136+
model = OpenAI(
137+
model_name="gpt-3.5-turbo",
138+
temperature=0.7,
139+
max_tokens=100,
140+
openai_api_key="badkey",
141+
)
142+
143+
with patch.object(
144+
model.client._client._client,
145+
"send",
146+
return_value=model_response,
147+
) as _:
148+
with start_transaction():
149+
input_text = "What is the capital of France?"
150+
model.invoke(input_text)
151+
152+
tx = events[0]
153+
assert tx["type"] == "transaction"
154+
155+
llm_spans = [
156+
span for span in tx.get("spans", []) if span.get("op") == "gen_ai.generate_text"
157+
]
158+
assert len(llm_spans) > 0
159+
160+
llm_span = llm_spans[0]
161+
assert llm_span["description"] == "generate_text gpt-3.5-turbo"
162+
assert llm_span["data"]["gen_ai.request.model"] == "gpt-3.5-turbo"
163+
assert llm_span["data"]["gen_ai.response.text"] == "The capital of France is Paris."
164+
assert llm_span["data"]["gen_ai.usage.total_tokens"] == 25
165+
assert llm_span["data"]["gen_ai.usage.input_tokens"] == 10
166+
assert llm_span["data"]["gen_ai.usage.output_tokens"] == 15
167+
168+
94169
@pytest.mark.skipif(
95170
LANGCHAIN_VERSION < (1,),
96171
reason="LangChain 1.0+ required (ONE AGENT refactor)",
@@ -1018,84 +1093,6 @@ def test_langchain_callback_list_existing_callback(sentry_init):
10181093
assert handler is sentry_callback
10191094

10201095

1021-
def test_langchain_integration_with_langchain_core_only(sentry_init, capture_events):
1022-
"""Test that the langchain integration works when langchain.agents.AgentExecutor
1023-
is not available or langchain is not installed, but langchain-core is.
1024-
"""
1025-
1026-
from langchain_core.outputs import LLMResult, Generation
1027-
1028-
with patch("sentry_sdk.integrations.langchain.AgentExecutor", None):
1029-
from sentry_sdk.integrations.langchain import (
1030-
LangchainIntegration,
1031-
SentryLangchainCallback,
1032-
)
1033-
1034-
sentry_init(
1035-
integrations=[LangchainIntegration(include_prompts=True)],
1036-
traces_sample_rate=1.0,
1037-
send_default_pii=True,
1038-
)
1039-
events = capture_events()
1040-
1041-
try:
1042-
LangchainIntegration.setup_once()
1043-
except Exception as e:
1044-
pytest.fail(f"setup_once() failed when AgentExecutor is None: {e}")
1045-
1046-
callback = SentryLangchainCallback(max_span_map_size=100, include_prompts=True)
1047-
1048-
run_id = "12345678-1234-1234-1234-123456789012"
1049-
serialized = {"_type": "openai-chat", "model_name": "gpt-3.5-turbo"}
1050-
prompts = ["What is the capital of France?"]
1051-
1052-
with start_transaction():
1053-
callback.on_llm_start(
1054-
serialized=serialized,
1055-
prompts=prompts,
1056-
run_id=run_id,
1057-
invocation_params={
1058-
"temperature": 0.7,
1059-
"max_tokens": 100,
1060-
"model": "gpt-3.5-turbo",
1061-
},
1062-
)
1063-
1064-
response = LLMResult(
1065-
generations=[[Generation(text="The capital of France is Paris.")]],
1066-
llm_output={
1067-
"token_usage": {
1068-
"total_tokens": 25,
1069-
"prompt_tokens": 10,
1070-
"completion_tokens": 15,
1071-
}
1072-
},
1073-
)
1074-
callback.on_llm_end(response=response, run_id=run_id)
1075-
1076-
assert len(events) > 0
1077-
tx = events[0]
1078-
assert tx["type"] == "transaction"
1079-
1080-
llm_spans = [
1081-
span
1082-
for span in tx.get("spans", [])
1083-
if span.get("op") == "gen_ai.generate_text"
1084-
]
1085-
assert len(llm_spans) > 0
1086-
1087-
llm_span = llm_spans[0]
1088-
assert llm_span["description"] == "generate_text gpt-3.5-turbo"
1089-
assert llm_span["data"]["gen_ai.request.model"] == "gpt-3.5-turbo"
1090-
assert (
1091-
llm_span["data"]["gen_ai.response.text"]
1092-
== "The capital of France is Paris."
1093-
)
1094-
assert llm_span["data"]["gen_ai.usage.total_tokens"] == 25
1095-
assert llm_span["data"]["gen_ai.usage.input_tokens"] == 10
1096-
assert llm_span["data"]["gen_ai.usage.output_tokens"] == 15
1097-
1098-
10991096
def test_langchain_message_role_mapping(sentry_init, capture_events):
11001097
"""Test that message roles are properly normalized in langchain integration."""
11011098
global llm_type

0 commit comments

Comments
 (0)