@@ -123,7 +123,7 @@ test = [
123123 " kubernetes>=29.0.0" , # For GkeCodeExecutor
124124 " langchain-community>=0.3.17" ,
125125 " langgraph>=0.2.60, <0.4.8" , # For LangGraphAgent
126- " litellm>=1.75.5, <1.81.0 " , # For LiteLLM tests
126+ " litellm>=1.75.5, <1.80.17 " , # For LiteLLM tests
127127 " llama-index-readers-file>=0.4.0" , # For retrieval tests
128128 " openai>=1.100.2" , # For LiteLLM
129129 " pytest-asyncio>=0.25.0" ,
@@ -153,7 +153,7 @@ extensions = [
153153 " docker>=7.0.0" , # For ContainerCodeExecutor
154154 " kubernetes>=29.0.0" , # For GkeCodeExecutor
155155 " langgraph>=0.2.60, <0.4.8" , # For LangGraphAgent
156- " litellm>=1.75.5, <1.81.0 " , # For LiteLlm class. Currently has OpenAI limitations. TODO: once LiteLlm fix it
156+ " litellm>=1.75.5, <1.80.17 " , # For LiteLlm class. Currently has OpenAI limitations. TODO: once LiteLlm fix it
157157 " llama-index-readers-file>=0.4.0" , # For retrieval using LlamaIndex.
158158 " llama-index-embeddings-google-genai>=0.3.0" , # For files retrieval using LlamaIndex.
159159 " lxml>=5.3.0" , # For load_web_page tool.
0 commit comments