@@ -3454,7 +3454,7 @@ def test_run_inference_with_litellm_string_prompt_format(
34543454 ) as mock_litellm , mock .patch (
34553455 "vertexai._genai._evals_common._call_litellm_completion"
34563456 ) as mock_call_litellm_completion :
3457- mock_litellm .utils . get_valid_models . return_value = [ "gpt-4o" ]
3457+ mock_litellm .get_llm_provider . return_value = ( "gpt-4o" , "openai" , None , None )
34583458 prompt_df = pd .DataFrame ([{"prompt" : "What is LiteLLM?" }])
34593459 expected_messages = [{"role" : "user" , "content" : "What is LiteLLM?" }]
34603460
@@ -3510,7 +3510,7 @@ def test_run_inference_with_litellm_openai_request_format(
35103510 ) as mock_litellm , mock .patch (
35113511 "vertexai._genai._evals_common._call_litellm_completion"
35123512 ) as mock_call_litellm_completion :
3513- mock_litellm .utils . get_valid_models . return_value = [ "gpt-4o" ]
3513+ mock_litellm .get_llm_provider . return_value = ( "gpt-4o" , "openai" , None , None )
35143514 prompt_df = pd .DataFrame (
35153515 [
35163516 {
@@ -3579,7 +3579,9 @@ def test_run_inference_with_unsupported_model_string(
35793579 with mock .patch (
35803580 "vertexai._genai._evals_common.litellm"
35813581 ) as mock_litellm_package :
3582- mock_litellm_package .utils .get_valid_models .return_value = []
3582+ mock_litellm_package .get_llm_provider .side_effect = ValueError (
3583+ "unsupported model"
3584+ )
35833585 evals_module = evals .Evals (api_client_ = mock_api_client_fixture )
35843586 prompt_df = pd .DataFrame ([{"prompt" : "test" }])
35853587
@@ -3646,7 +3648,7 @@ def test_run_inference_with_litellm_parsing(
36463648 # fmt: off
36473649 with mock .patch ("vertexai._genai._evals_common.litellm" ) as mock_litellm :
36483650 # fmt: on
3649- mock_litellm .utils . get_valid_models . return_value = [ "gpt-4o" ]
3651+ mock_litellm .get_llm_provider . return_value = ( "gpt-4o" , "openai" , None , None )
36503652 inference_result = self .client .evals .run_inference (
36513653 model = "gpt-4o" ,
36523654 src = mock_df ,
0 commit comments