Skip to content

Commit f5c4118

Browse files
committed
fix(tests): update model to gemini-3.1-pro-preview and handle empty streaming chunks
1 parent 84e792f commit f5c4118

1 file changed

Lines changed: 5 additions & 4 deletions

File tree

tests/integration/models/test_google_llm.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -23,13 +23,13 @@
2323

2424
@pytest.fixture
2525
def gemini_llm():
26-
return Gemini(model="gemini-1.5-flash")
26+
return Gemini(model="gemini-3.1-pro-preview")
2727

2828

2929
@pytest.fixture
3030
def llm_request():
3131
return LlmRequest(
32-
model="gemini-1.5-flash",
32+
model="gemini-3.1-pro-preview",
3333
contents=[Content(role="user", parts=[Part.from_text(text="Hello")])],
3434
config=types.GenerateContentConfig(
3535
temperature=0.1,
@@ -57,8 +57,9 @@ async def test_generate_content_async_stream(gemini_llm, llm_request):
5757
text = ""
5858
for i in range(len(responses) - 1):
5959
assert responses[i].partial is True
60-
assert responses[i].content.parts[0].text
61-
text += responses[i].content.parts[0].text
60+
assert responses[i].content.parts[0].text or responses[i].content.parts[0].thought_signature
61+
if responses[i].content.parts[0].text:
62+
text += responses[i].content.parts[0].text
6263

6364
# Last message should be accumulated text
6465
assert responses[-1].content.parts[0].text == text

0 commit comments

Comments
 (0)