|
55 | 55 | openai = None |
56 | 56 |
|
57 | 57 |
|
| 58 | +try: |
| 59 | + import anthropic |
| 60 | +except ImportError: |
| 61 | + anthropic = None |
| 62 | + |
| 63 | + |
| 64 | +try: |
| 65 | + import google |
| 66 | +except ImportError: |
| 67 | + google = None |
| 68 | + |
| 69 | + |
58 | 70 | from tests import _warning_recorder, _warning_recorder_mgr |
59 | 71 |
|
60 | 72 | from typing import TYPE_CHECKING |
@@ -1097,7 +1109,12 @@ def inner(response_content, serialize_pydantic=False, request_headers=None): |
1097 | 1109 | ) |
1098 | 1110 |
|
1099 | 1111 | if serialize_pydantic: |
1100 | | - response_content = json.dumps(response_content.model_dump()).encode("utf-8") |
| 1112 | + response_content = json.dumps( |
| 1113 | + response_content.model_dump( |
| 1114 | + by_alias=True, |
| 1115 | + exclude_none=True, |
| 1116 | + ) |
| 1117 | + ).encode("utf-8") |
1101 | 1118 |
|
1102 | 1119 | response = HttpxResponse( |
1103 | 1120 | 200, |
@@ -1224,6 +1241,30 @@ def streaming_chat_completions_model_response(): |
1224 | 1241 | ] |
1225 | 1242 |
|
1226 | 1243 |
|
| 1244 | +@pytest.fixture |
| 1245 | +def nonstreaming_chat_completions_model_response(): |
| 1246 | + return openai.types.chat.ChatCompletion( |
| 1247 | + id="chatcmpl-test", |
| 1248 | + choices=[ |
| 1249 | + openai.types.chat.chat_completion.Choice( |
| 1250 | + index=0, |
| 1251 | + finish_reason="stop", |
| 1252 | + message=openai.types.chat.ChatCompletionMessage( |
| 1253 | + role="assistant", content="Test response" |
| 1254 | + ), |
| 1255 | + ) |
| 1256 | + ], |
| 1257 | + created=1234567890, |
| 1258 | + model="gpt-3.5-turbo", |
| 1259 | + object="chat.completion", |
| 1260 | + usage=openai.types.CompletionUsage( |
| 1261 | + prompt_tokens=10, |
| 1262 | + completion_tokens=20, |
| 1263 | + total_tokens=30, |
| 1264 | + ), |
| 1265 | + ) |
| 1266 | + |
| 1267 | + |
1227 | 1268 | @pytest.fixture |
1228 | 1269 | def nonstreaming_responses_model_response(): |
1229 | 1270 | return openai.types.responses.Response( |
@@ -1263,6 +1304,54 @@ def nonstreaming_responses_model_response(): |
1263 | 1304 | ) |
1264 | 1305 |
|
1265 | 1306 |
|
| 1307 | +@pytest.fixture |
| 1308 | +def nonstreaming_anthropic_model_response(): |
| 1309 | + return anthropic.types.Message( |
| 1310 | + id="msg_123", |
| 1311 | + type="message", |
| 1312 | + role="assistant", |
| 1313 | + model="claude-3-opus-20240229", |
| 1314 | + content=[ |
| 1315 | + anthropic.types.TextBlock( |
| 1316 | + type="text", |
| 1317 | + text="Hello, how can I help you?", |
| 1318 | + ) |
| 1319 | + ], |
| 1320 | + stop_reason="end_turn", |
| 1321 | + stop_sequence=None, |
| 1322 | + usage=anthropic.types.Usage( |
| 1323 | + input_tokens=10, |
| 1324 | + output_tokens=20, |
| 1325 | + ), |
| 1326 | + ) |
| 1327 | + |
| 1328 | + |
| 1329 | +@pytest.fixture |
| 1330 | +def nonstreaming_google_genai_model_response(): |
| 1331 | + return google.genai.types.GenerateContentResponse( |
| 1332 | + response_id="resp_123", |
| 1333 | + candidates=[ |
| 1334 | + google.genai.types.Candidate( |
| 1335 | + content=google.genai.types.Content( |
| 1336 | + role="model", |
| 1337 | + parts=[ |
| 1338 | + google.genai.types.Part( |
| 1339 | + text="Hello, how can I help you?", |
| 1340 | + ) |
| 1341 | + ], |
| 1342 | + ), |
| 1343 | + finish_reason="STOP", |
| 1344 | + ) |
| 1345 | + ], |
| 1346 | + model_version="gemini/gemini-pro", |
| 1347 | + usage_metadata=google.genai.types.GenerateContentResponseUsageMetadata( |
| 1348 | + prompt_token_count=10, |
| 1349 | + candidates_token_count=20, |
| 1350 | + total_token_count=30, |
| 1351 | + ), |
| 1352 | + ) |
| 1353 | + |
| 1354 | + |
1266 | 1355 | @pytest.fixture |
1267 | 1356 | def responses_tool_call_model_responses(): |
1268 | 1357 | def inner( |
|
0 commit comments