Skip to content

Commit df300a9

Browse files
test(langchain): Add text completion test
1 parent a728bd0 commit df300a9

File tree

1 file changed

+76
-78
lines changed

1 file changed

+76
-78
lines changed

tests/integrations/langchain/test_langchain.py

Lines changed: 76 additions & 78 deletions
Original file line numberDiff line numberDiff line change
@@ -9,10 +9,10 @@
99

1010
try:
1111
# Langchain >= 0.2
12-
from langchain_openai import ChatOpenAI
12+
from langchain_openai import ChatOpenAI, OpenAI
1313
except ImportError:
1414
# Langchain < 0.2
15-
from langchain_community.chat_models import ChatOpenAI
15+
from langchain_community.chat_models import ChatOpenAI, OpenAI
1616

1717
from langchain_core.callbacks import BaseCallbackManager, CallbackManagerForLLMRun
1818
from langchain_core.messages import BaseMessage, AIMessageChunk
@@ -50,6 +50,9 @@
5050
ChoiceDeltaToolCallFunction,
5151
)
5252

53+
from openai.types.completion import Completion
54+
from openai.types.completion_choice import CompletionChoice
55+
5356
from openai.types.completion_usage import (
5457
CompletionUsage,
5558
)
@@ -91,6 +94,77 @@ def _llm_type(self) -> str:
9194
return llm_type
9295

9396

97+
def test_langchain_text_completion(
98+
sentry_init,
99+
capture_events,
100+
get_model_response,
101+
):
102+
sentry_init(
103+
integrations=[
104+
LangchainIntegration(
105+
include_prompts=True,
106+
)
107+
],
108+
traces_sample_rate=1.0,
109+
send_default_pii=True,
110+
)
111+
events = capture_events()
112+
113+
model_repsonse = get_model_response(
114+
Completion(
115+
id="completion-id",
116+
object="text_completion",
117+
created=10000000,
118+
model="gpt-3.5-turbo",
119+
choices=[
120+
CompletionChoice(
121+
index=0,
122+
finish_reason="stop",
123+
text="The capital of France is Paris.",
124+
)
125+
],
126+
usage=CompletionUsage(
127+
prompt_tokens=10,
128+
completion_tokens=15,
129+
total_tokens=25,
130+
),
131+
),
132+
serialize_pydantic=True,
133+
)
134+
135+
model = OpenAI(
136+
model_name="gpt-3.5-turbo",
137+
temperature=0.7,
138+
max_tokens=100,
139+
openai_api_key="badkey",
140+
)
141+
142+
with patch.object(
143+
model.client._client._client,
144+
"send",
145+
return_value=model_repsonse,
146+
) as _:
147+
with start_transaction():
148+
input_text = "What is the capital of France?"
149+
model.invoke(input_text)
150+
151+
tx = events[0]
152+
assert tx["type"] == "transaction"
153+
154+
llm_spans = [
155+
span for span in tx.get("spans", []) if span.get("op") == "gen_ai.pipeline"
156+
]
157+
assert len(llm_spans) > 0
158+
159+
llm_span = llm_spans[0]
160+
assert llm_span["description"] == "Langchain LLM call"
161+
assert llm_span["data"]["gen_ai.request.model"] == "gpt-3.5-turbo"
162+
assert llm_span["data"]["gen_ai.response.text"] == "The capital of France is Paris."
163+
assert llm_span["data"]["gen_ai.usage.total_tokens"] == 25
164+
assert llm_span["data"]["gen_ai.usage.input_tokens"] == 10
165+
assert llm_span["data"]["gen_ai.usage.output_tokens"] == 15
166+
167+
94168
@pytest.mark.skipif(
95169
LANGCHAIN_VERSION < (1,),
96170
reason="LangChain 1.0+ required (ONE AGENT refactor)",
@@ -1026,82 +1100,6 @@ def test_langchain_callback_list_existing_callback(sentry_init):
10261100
assert handler is sentry_callback
10271101

10281102

1029-
def test_langchain_integration_with_langchain_core_only(sentry_init, capture_events):
1030-
"""Test that the langchain integration works when langchain.agents.AgentExecutor
1031-
is not available or langchain is not installed, but langchain-core is.
1032-
"""
1033-
1034-
from langchain_core.outputs import LLMResult, Generation
1035-
1036-
with patch("sentry_sdk.integrations.langchain.AgentExecutor", None):
1037-
from sentry_sdk.integrations.langchain import (
1038-
LangchainIntegration,
1039-
SentryLangchainCallback,
1040-
)
1041-
1042-
sentry_init(
1043-
integrations=[LangchainIntegration(include_prompts=True)],
1044-
traces_sample_rate=1.0,
1045-
send_default_pii=True,
1046-
)
1047-
events = capture_events()
1048-
1049-
try:
1050-
LangchainIntegration.setup_once()
1051-
except Exception as e:
1052-
pytest.fail(f"setup_once() failed when AgentExecutor is None: {e}")
1053-
1054-
callback = SentryLangchainCallback(max_span_map_size=100, include_prompts=True)
1055-
1056-
run_id = "12345678-1234-1234-1234-123456789012"
1057-
serialized = {"_type": "openai-chat", "model_name": "gpt-3.5-turbo"}
1058-
prompts = ["What is the capital of France?"]
1059-
1060-
with start_transaction():
1061-
callback.on_llm_start(
1062-
serialized=serialized,
1063-
prompts=prompts,
1064-
run_id=run_id,
1065-
invocation_params={
1066-
"temperature": 0.7,
1067-
"max_tokens": 100,
1068-
"model": "gpt-3.5-turbo",
1069-
},
1070-
)
1071-
1072-
response = LLMResult(
1073-
generations=[[Generation(text="The capital of France is Paris.")]],
1074-
llm_output={
1075-
"token_usage": {
1076-
"total_tokens": 25,
1077-
"prompt_tokens": 10,
1078-
"completion_tokens": 15,
1079-
}
1080-
},
1081-
)
1082-
callback.on_llm_end(response=response, run_id=run_id)
1083-
1084-
assert len(events) > 0
1085-
tx = events[0]
1086-
assert tx["type"] == "transaction"
1087-
1088-
llm_spans = [
1089-
span for span in tx.get("spans", []) if span.get("op") == "gen_ai.pipeline"
1090-
]
1091-
assert len(llm_spans) > 0
1092-
1093-
llm_span = llm_spans[0]
1094-
assert llm_span["description"] == "Langchain LLM call"
1095-
assert llm_span["data"]["gen_ai.request.model"] == "gpt-3.5-turbo"
1096-
assert (
1097-
llm_span["data"]["gen_ai.response.text"]
1098-
== "The capital of France is Paris."
1099-
)
1100-
assert llm_span["data"]["gen_ai.usage.total_tokens"] == 25
1101-
assert llm_span["data"]["gen_ai.usage.input_tokens"] == 10
1102-
assert llm_span["data"]["gen_ai.usage.output_tokens"] == 15
1103-
1104-
11051103
def test_langchain_message_role_mapping(sentry_init, capture_events):
11061104
"""Test that message roles are properly normalized in langchain integration."""
11071105
global llm_type

0 commit comments

Comments
 (0)