@@ -1905,3 +1905,153 @@ async def test_streaming_redacted_thinking_block_preserved_in_final():
19051905
19061906 text_part = final .content .parts [1 ]
19071907 assert text_part .text == "Done."
1908+
1909+
1910+
1911+
1912+ # --- Tests for finish_reason ---
1913+
1914+
1915+ @pytest .mark .parametrize (
1916+ "stop_reason,expected_finish_reason" ,
1917+ [
1918+ ("end_turn" , types .FinishReason .STOP ),
1919+ ("stop_sequence" , types .FinishReason .STOP ),
1920+ ("tool_use" , types .FinishReason .STOP ),
1921+ ("max_tokens" , types .FinishReason .MAX_TOKENS ),
1922+ ("pause_turn" , types .FinishReason .STOP ),
1923+ ("refusal" , types .FinishReason .SAFETY ),
1924+ (None , types .FinishReason .FINISH_REASON_UNSPECIFIED ),
1925+ ],
1926+ )
1927+ def test_to_google_genai_finish_reason (stop_reason , expected_finish_reason ):
1928+ """All Anthropic stop_reason values map to the correct ADK FinishReason."""
1929+ from google .adk .models .anthropic_llm import to_google_genai_finish_reason
1930+
1931+ assert to_google_genai_finish_reason (stop_reason ) == expected_finish_reason
1932+
1933+
1934+ @pytest .mark .asyncio
1935+ async def test_non_streaming_sets_finish_reason ():
1936+ """finish_reason is populated on non-streaming LlmResponse."""
1937+ llm = AnthropicLlm (model = "claude-sonnet-4-20250514" )
1938+ mock_message = anthropic_types .Message (
1939+ id = "msg_test" ,
1940+ content = [anthropic_types .TextBlock (text = "Hi" , type = "text" , citations = None )],
1941+ model = "claude-sonnet-4-20250514" ,
1942+ role = "assistant" ,
1943+ stop_reason = "end_turn" ,
1944+ stop_sequence = None ,
1945+ type = "message" ,
1946+ usage = anthropic_types .Usage (
1947+ input_tokens = 5 ,
1948+ output_tokens = 2 ,
1949+ cache_creation_input_tokens = 0 ,
1950+ cache_read_input_tokens = 0 ,
1951+ server_tool_use = None ,
1952+ service_tier = None ,
1953+ ),
1954+ )
1955+ mock_client = MagicMock ()
1956+ mock_client .messages .create = AsyncMock (return_value = mock_message )
1957+
1958+ llm_request = LlmRequest (
1959+ model = "claude-sonnet-4-20250514" ,
1960+ contents = [Content (role = "user" , parts = [Part .from_text (text = "Hi" )])],
1961+ config = types .GenerateContentConfig (system_instruction = "Test" ),
1962+ )
1963+
1964+ with mock .patch .object (llm , "_anthropic_client" , mock_client ):
1965+ responses = [
1966+ r async for r in llm .generate_content_async (llm_request , stream = False )
1967+ ]
1968+
1969+ assert len (responses ) == 1
1970+ assert responses [0 ].finish_reason == types .FinishReason .STOP
1971+
1972+
1973+ @pytest .mark .asyncio
1974+ async def test_non_streaming_finish_reason_max_tokens ():
1975+ """finish_reason MAX_TOKENS is set when stop_reason is max_tokens."""
1976+ llm = AnthropicLlm (model = "claude-sonnet-4-20250514" )
1977+ mock_message = anthropic_types .Message (
1978+ id = "msg_test" ,
1979+ content = [anthropic_types .TextBlock (text = "Hi" , type = "text" , citations = None )],
1980+ model = "claude-sonnet-4-20250514" ,
1981+ role = "assistant" ,
1982+ stop_reason = "max_tokens" ,
1983+ stop_sequence = None ,
1984+ type = "message" ,
1985+ usage = anthropic_types .Usage (
1986+ input_tokens = 5 ,
1987+ output_tokens = 2 ,
1988+ cache_creation_input_tokens = 0 ,
1989+ cache_read_input_tokens = 0 ,
1990+ server_tool_use = None ,
1991+ service_tier = None ,
1992+ ),
1993+ )
1994+ mock_client = MagicMock ()
1995+ mock_client .messages .create = AsyncMock (return_value = mock_message )
1996+
1997+ llm_request = LlmRequest (
1998+ model = "claude-sonnet-4-20250514" ,
1999+ contents = [Content (role = "user" , parts = [Part .from_text (text = "Hi" )])],
2000+ config = types .GenerateContentConfig (system_instruction = "Test" ),
2001+ )
2002+
2003+ with mock .patch .object (llm , "_anthropic_client" , mock_client ):
2004+ responses = [
2005+ r async for r in llm .generate_content_async (llm_request , stream = False )
2006+ ]
2007+
2008+ assert responses [0 ].finish_reason == types .FinishReason .MAX_TOKENS
2009+
2010+
2011+ @pytest .mark .asyncio
2012+ async def test_streaming_sets_finish_reason ():
2013+ """finish_reason is populated on the final streaming LlmResponse."""
2014+ llm = AnthropicLlm (model = "claude-sonnet-4-20250514" )
2015+
2016+ events = [
2017+ MagicMock (
2018+ type = "message_start" ,
2019+ message = MagicMock (usage = MagicMock (input_tokens = 5 , output_tokens = 0 )),
2020+ ),
2021+ MagicMock (
2022+ type = "content_block_start" ,
2023+ index = 0 ,
2024+ content_block = anthropic_types .TextBlock (text = "" , type = "text" ),
2025+ ),
2026+ MagicMock (
2027+ type = "content_block_delta" ,
2028+ index = 0 ,
2029+ delta = anthropic_types .TextDelta (text = "Hi" , type = "text_delta" ),
2030+ ),
2031+ MagicMock (type = "content_block_stop" , index = 0 ),
2032+ MagicMock (
2033+ type = "message_delta" ,
2034+ delta = MagicMock (stop_reason = "max_tokens" ),
2035+ usage = MagicMock (output_tokens = 1 ),
2036+ ),
2037+ MagicMock (type = "message_stop" ),
2038+ ]
2039+
2040+ mock_client = MagicMock ()
2041+ mock_client .messages .create = AsyncMock (
2042+ return_value = _make_mock_stream_events (events )
2043+ )
2044+
2045+ llm_request = LlmRequest (
2046+ model = "claude-sonnet-4-20250514" ,
2047+ contents = [Content (role = "user" , parts = [Part .from_text (text = "Hi" )])],
2048+ config = types .GenerateContentConfig (system_instruction = "Test" ),
2049+ )
2050+
2051+ with mock .patch .object (llm , "_anthropic_client" , mock_client ):
2052+ responses = [
2053+ r async for r in llm .generate_content_async (llm_request , stream = True )
2054+ ]
2055+
2056+ final = responses [- 1 ]
2057+ assert final .finish_reason == types .FinishReason .MAX_TOKENS
0 commit comments