Skip to content

Commit 66ef2e6

Browse files
test(litellm): Replace mocks with httpx types in nonstreaming completion() tests (#5937)
Replace mocks with `httpx` types to avoid test failures when library internals change.
1 parent 96ebbf6 commit 66ef2e6

File tree

6 files changed

+449
-196
lines changed

6 files changed

+449
-196
lines changed

scripts/populate_tox/config.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -237,6 +237,9 @@
237237
},
238238
"litellm": {
239239
"package": "litellm",
240+
"deps": {
241+
"*": ["anthropic", "google-genai"],
242+
},
240243
},
241244
"litestar": {
242245
"package": "litestar",

scripts/populate_tox/package_dependencies.jsonl

Lines changed: 31 additions & 31 deletions
Large diffs are not rendered by default.

scripts/populate_tox/releases.jsonl

Lines changed: 1 addition & 3 deletions
Large diffs are not rendered by default.

tests/conftest.py

Lines changed: 90 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,18 @@
5555
openai = None
5656

5757

58+
try:
59+
import anthropic
60+
except ImportError:
61+
anthropic = None
62+
63+
64+
try:
65+
import google
66+
except ImportError:
67+
google = None
68+
69+
5870
from tests import _warning_recorder, _warning_recorder_mgr
5971

6072
from typing import TYPE_CHECKING
@@ -1097,7 +1109,12 @@ def inner(response_content, serialize_pydantic=False, request_headers=None):
10971109
)
10981110

10991111
if serialize_pydantic:
1100-
response_content = json.dumps(response_content.model_dump()).encode("utf-8")
1112+
response_content = json.dumps(
1113+
response_content.model_dump(
1114+
by_alias=True,
1115+
exclude_none=True,
1116+
)
1117+
).encode("utf-8")
11011118

11021119
response = HttpxResponse(
11031120
200,
@@ -1224,6 +1241,30 @@ def streaming_chat_completions_model_response():
12241241
]
12251242

12261243

1244+
@pytest.fixture
1245+
def nonstreaming_chat_completions_model_response():
1246+
return openai.types.chat.ChatCompletion(
1247+
id="chatcmpl-test",
1248+
choices=[
1249+
openai.types.chat.chat_completion.Choice(
1250+
index=0,
1251+
finish_reason="stop",
1252+
message=openai.types.chat.ChatCompletionMessage(
1253+
role="assistant", content="Test response"
1254+
),
1255+
)
1256+
],
1257+
created=1234567890,
1258+
model="gpt-3.5-turbo",
1259+
object="chat.completion",
1260+
usage=openai.types.CompletionUsage(
1261+
prompt_tokens=10,
1262+
completion_tokens=20,
1263+
total_tokens=30,
1264+
),
1265+
)
1266+
1267+
12271268
@pytest.fixture
12281269
def nonstreaming_responses_model_response():
12291270
return openai.types.responses.Response(
@@ -1263,6 +1304,54 @@ def nonstreaming_responses_model_response():
12631304
)
12641305

12651306

1307+
@pytest.fixture
1308+
def nonstreaming_anthropic_model_response():
1309+
return anthropic.types.Message(
1310+
id="msg_123",
1311+
type="message",
1312+
role="assistant",
1313+
model="claude-3-opus-20240229",
1314+
content=[
1315+
anthropic.types.TextBlock(
1316+
type="text",
1317+
text="Hello, how can I help you?",
1318+
)
1319+
],
1320+
stop_reason="end_turn",
1321+
stop_sequence=None,
1322+
usage=anthropic.types.Usage(
1323+
input_tokens=10,
1324+
output_tokens=20,
1325+
),
1326+
)
1327+
1328+
1329+
@pytest.fixture
1330+
def nonstreaming_google_genai_model_response():
1331+
return google.genai.types.GenerateContentResponse(
1332+
response_id="resp_123",
1333+
candidates=[
1334+
google.genai.types.Candidate(
1335+
content=google.genai.types.Content(
1336+
role="model",
1337+
parts=[
1338+
google.genai.types.Part(
1339+
text="Hello, how can I help you?",
1340+
)
1341+
],
1342+
),
1343+
finish_reason="STOP",
1344+
)
1345+
],
1346+
model_version="gemini/gemini-pro",
1347+
usage_metadata=google.genai.types.GenerateContentResponseUsageMetadata(
1348+
prompt_token_count=10,
1349+
candidates_token_count=20,
1350+
total_token_count=30,
1351+
),
1352+
)
1353+
1354+
12661355
@pytest.fixture
12671356
def responses_tool_call_model_responses():
12681357
def inner(

0 commit comments

Comments
 (0)