Skip to content

Commit 9590ca7

Browse files
committed
Update litellm version for vulnerability remediation
1 parent 3e85f64 commit 9590ca7

3 files changed

Lines changed: 16 additions & 7 deletions

File tree

setup.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -181,8 +181,8 @@
181181
"jsonschema",
182182
"ruamel.yaml",
183183
"pyyaml",
184-
"litellm>=1.75.5, <=1.82.6",
185-
# For LiteLLM tests. Upper bound pinned: versions 1.82.7+ compromised in supply chain attack.
184+
"litellm>=1.83.0, <2",
185+
# For LiteLLM tests. Versions >=1.82.7,<1.83.0 compromised in supply chain attack.
186186
]
187187

188188
langchain_extra_require = [

tests/unit/vertexai/genai/test_evals.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3454,7 +3454,7 @@ def test_run_inference_with_litellm_string_prompt_format(
34543454
) as mock_litellm, mock.patch(
34553455
"vertexai._genai._evals_common._call_litellm_completion"
34563456
) as mock_call_litellm_completion:
3457-
mock_litellm.utils.get_valid_models.return_value = ["gpt-4o"]
3457+
mock_litellm.get_llm_provider.return_value = ("gpt-4o", "openai", None , None)
34583458
prompt_df = pd.DataFrame([{"prompt": "What is LiteLLM?"}])
34593459
expected_messages = [{"role": "user", "content": "What is LiteLLM?"}]
34603460

@@ -3510,7 +3510,7 @@ def test_run_inference_with_litellm_openai_request_format(
35103510
) as mock_litellm, mock.patch(
35113511
"vertexai._genai._evals_common._call_litellm_completion"
35123512
) as mock_call_litellm_completion:
3513-
mock_litellm.utils.get_valid_models.return_value = ["gpt-4o"]
3513+
mock_litellm.get_llm_provider.return_value = ("gpt-4o", "openai", None , None)
35143514
prompt_df = pd.DataFrame(
35153515
[
35163516
{
@@ -3579,7 +3579,9 @@ def test_run_inference_with_unsupported_model_string(
35793579
with mock.patch(
35803580
"vertexai._genai._evals_common.litellm"
35813581
) as mock_litellm_package:
3582-
mock_litellm_package.utils.get_valid_models.return_value = []
3582+
mock_litellm_package.get_llm_provider.side_effect = ValueError(
3583+
"unsupported model"
3584+
)
35833585
evals_module = evals.Evals(api_client_=mock_api_client_fixture)
35843586
prompt_df = pd.DataFrame([{"prompt": "test"}])
35853587

@@ -3646,7 +3648,7 @@ def test_run_inference_with_litellm_parsing(
36463648
# fmt: off
36473649
with mock.patch("vertexai._genai._evals_common.litellm") as mock_litellm:
36483650
# fmt: on
3649-
mock_litellm.utils.get_valid_models.return_value = ["gpt-4o"]
3651+
mock_litellm.get_llm_provider.return_value = ("gpt-4o", "openai", None , None)
36503652
inference_result = self.client.evals.run_inference(
36513653
model="gpt-4o",
36523654
src=mock_df,

vertexai/_genai/_evals_common.py

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -761,7 +761,14 @@ def _is_litellm_vertex_maas_model(model: str) -> bool:
761761

762762
def _is_litellm_model(model: str) -> bool:
763763
"""Checks if the model name corresponds to a valid LiteLLM model name."""
764-
return model in litellm.utils.get_valid_models(model)
764+
if litellm is None:
765+
return False
766+
767+
try:
768+
litellm.get_llm_provider(model)
769+
return True
770+
except ValueError:
771+
return False
765772

766773

767774
def _is_gemini_model(model: str) -> bool:

0 commit comments

Comments
 (0)