|
19 | 19 | import warnings |
20 | 20 |
|
21 | 21 | from google.adk.models.lite_llm import _content_to_message_param |
| 22 | +from google.adk.models.lite_llm import _FINISH_REASON_MAPPING |
22 | 23 | from google.adk.models.lite_llm import _function_declaration_to_tool_param |
23 | 24 | from google.adk.models.lite_llm import _get_content |
24 | 25 | from google.adk.models.lite_llm import _message_to_generate_content_response |
@@ -1938,3 +1939,113 @@ def test_non_gemini_litellm_no_warning(): |
1938 | 1939 | # Test with non-Gemini model |
1939 | 1940 | LiteLlm(model="openai/gpt-4o") |
1940 | 1941 | assert len(w) == 0 |
| 1942 | + |
| 1943 | + |
| 1944 | +@pytest.mark.parametrize( |
| 1945 | + "finish_reason,response_content,expected_content,has_tool_calls", |
| 1946 | + [ |
| 1947 | + ("length", "Test response", "Test response", False), |
| 1948 | + ("stop", "Complete response", "Complete response", False), |
| 1949 | + ( |
| 1950 | + "tool_calls", |
| 1951 | + "", |
| 1952 | + "", |
| 1953 | + True, |
| 1954 | + ), |
| 1955 | + ("content_filter", "", "", False), |
| 1956 | + ], |
| 1957 | + ids=["length", "stop", "tool_calls", "content_filter"], |
| 1958 | +) |
| 1959 | +@pytest.mark.asyncio |
| 1960 | +async def test_finish_reason_propagation( |
| 1961 | + mock_acompletion, |
| 1962 | + lite_llm_instance, |
| 1963 | + finish_reason, |
| 1964 | + response_content, |
| 1965 | + expected_content, |
| 1966 | + has_tool_calls, |
| 1967 | +): |
| 1968 | + """Test that finish_reason is properly propagated from LiteLLM response.""" |
| 1969 | + tool_calls = None |
| 1970 | + if has_tool_calls: |
| 1971 | + tool_calls = [ |
| 1972 | + ChatCompletionMessageToolCall( |
| 1973 | + type="function", |
| 1974 | + id="test_id", |
| 1975 | + function=Function( |
| 1976 | + name="test_function", |
| 1977 | + arguments='{"arg": "value"}', |
| 1978 | + ), |
| 1979 | + ) |
| 1980 | + ] |
| 1981 | + |
| 1982 | + mock_response = ModelResponse( |
| 1983 | + choices=[ |
| 1984 | + Choices( |
| 1985 | + message=ChatCompletionAssistantMessage( |
| 1986 | + role="assistant", |
| 1987 | + content=response_content, |
| 1988 | + tool_calls=tool_calls, |
| 1989 | + ), |
| 1990 | + finish_reason=finish_reason, |
| 1991 | + ) |
| 1992 | + ] |
| 1993 | + ) |
| 1994 | + mock_acompletion.return_value = mock_response |
| 1995 | + |
| 1996 | + llm_request = LlmRequest( |
| 1997 | + contents=[ |
| 1998 | + types.Content( |
| 1999 | + role="user", parts=[types.Part.from_text(text="Test prompt")] |
| 2000 | + ) |
| 2001 | + ], |
| 2002 | + ) |
| 2003 | + |
| 2004 | + async for response in lite_llm_instance.generate_content_async(llm_request): |
| 2005 | + assert response.content.role == "model" |
| 2006 | + # Verify finish_reason is mapped to FinishReason enum |
| 2007 | + assert isinstance(response.finish_reason, types.FinishReason) |
| 2008 | + # Verify correct enum mapping using the actual mapping from lite_llm |
| 2009 | + assert response.finish_reason == _FINISH_REASON_MAPPING[finish_reason] |
| 2010 | + if expected_content: |
| 2011 | + assert response.content.parts[0].text == expected_content |
| 2012 | + if has_tool_calls: |
| 2013 | + assert len(response.content.parts) > 0 |
| 2014 | + assert response.content.parts[-1].function_call.name == "test_function" |
| 2015 | + |
| 2016 | + mock_acompletion.assert_called_once() |
| 2017 | + |
| 2018 | + |
| 2019 | +@pytest.mark.asyncio |
| 2020 | +async def test_finish_reason_unknown_maps_to_other( |
| 2021 | + mock_acompletion, lite_llm_instance |
| 2022 | +): |
| 2023 | + """Test that unknown finish_reason values map to FinishReason.OTHER.""" |
| 2024 | + mock_response = ModelResponse( |
| 2025 | + choices=[ |
| 2026 | + Choices( |
| 2027 | + message=ChatCompletionAssistantMessage( |
| 2028 | + role="assistant", |
| 2029 | + content="Test response", |
| 2030 | + ), |
| 2031 | + finish_reason="unknown_reason_type", |
| 2032 | + ) |
| 2033 | + ] |
| 2034 | + ) |
| 2035 | + mock_acompletion.return_value = mock_response |
| 2036 | + |
| 2037 | + llm_request = LlmRequest( |
| 2038 | + contents=[ |
| 2039 | + types.Content( |
| 2040 | + role="user", parts=[types.Part.from_text(text="Test prompt")] |
| 2041 | + ) |
| 2042 | + ], |
| 2043 | + ) |
| 2044 | + |
| 2045 | + async for response in lite_llm_instance.generate_content_async(llm_request): |
| 2046 | + assert response.content.role == "model" |
| 2047 | + # Unknown finish_reason should map to OTHER |
| 2048 | + assert isinstance(response.finish_reason, types.FinishReason) |
| 2049 | + assert response.finish_reason == types.FinishReason.OTHER |
| 2050 | + |
| 2051 | + mock_acompletion.assert_called_once() |
0 commit comments