Skip to content

Commit 7b4c776

Browse files
committed
fix: default error_on_no_tool_call to false in llm wrappers
1 parent 1e0dfb2 commit 7b4c776

4 files changed

Lines changed: 38 additions & 4 deletions

File tree

packages/uipath-llamaindex/pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "uipath-llamaindex"
3-
version = "0.5.9"
3+
version = "0.5.10"
44
description = "Python SDK that enables developers to build and deploy LlamaIndex agents to the UiPath Cloud Platform"
55
readme = { file = "README.md", content-type = "text/markdown" }
66
requires-python = ">=3.11"

packages/uipath-llamaindex/src/uipath_llamaindex/llms/_openai.py

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,9 @@
11
import os
2-
from typing import Any
2+
from typing import Any, List
33

44
import httpx
5+
from llama_index.core.base.llms.types import ChatResponse
6+
from llama_index.core.tools import ToolSelection
57
from llama_index.llms.azure_openai import AzureOpenAI # type: ignore
68
from uipath._utils._ssl_context import get_httpx_client_kwargs
79
from uipath.utils import EndpointManager
@@ -87,3 +89,13 @@ def __init__(
8789
}
8890
final_kwargs = {**defaults, **kwargs}
8991
super().__init__(**final_kwargs)
92+
93+
def get_tool_calls_from_response(
94+
self,
95+
response: ChatResponse,
96+
error_on_no_tool_call: bool = False,
97+
**kwargs: Any,
98+
) -> List[ToolSelection]:
99+
return super().get_tool_calls_from_response(
100+
response, error_on_no_tool_call=error_on_no_tool_call, **kwargs
101+
)

packages/uipath-llamaindex/src/uipath_llamaindex/llms/bedrock.py

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import logging
22
import os
3-
from typing import Any, Optional, Sequence
3+
from typing import Any, List, Optional, Sequence
44

55
from uipath.utils import EndpointManager
66

@@ -49,6 +49,7 @@ def _check_bedrock_dependencies() -> None:
4949
CompletionResponse,
5050
CompletionResponseAsyncGen,
5151
)
52+
from llama_index.core.tools import ToolSelection # noqa: E402
5253
from llama_index.core.llms.callbacks import ( # noqa: E402
5354
llm_chat_callback,
5455
llm_completion_callback,
@@ -184,6 +185,16 @@ def __init__(
184185
**kwargs,
185186
)
186187

188+
def get_tool_calls_from_response(
189+
self,
190+
response: ChatResponse,
191+
error_on_no_tool_call: bool = False,
192+
**kwargs: Any,
193+
) -> List[ToolSelection]:
194+
return super().get_tool_calls_from_response(
195+
response, error_on_no_tool_call=error_on_no_tool_call, **kwargs
196+
)
197+
187198

188199
class UiPathChatBedrock(Bedrock):
189200
def __init__(

packages/uipath-llamaindex/src/uipath_llamaindex/llms/vertex.py

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import logging
22
import os
3-
from typing import Any, Generator, Optional, Sequence
3+
from typing import Any, Generator, List, Optional, Sequence
44

55
import httpx
66
from llama_index.core.callbacks import CallbackManager
@@ -47,6 +47,7 @@ def _check_vertex_dependencies() -> None:
4747
CompletionResponseAsyncGen,
4848
CompletionResponseGen,
4949
)
50+
from llama_index.core.tools import ToolSelection # noqa: E402
5051
from llama_index.core.bridge.pydantic import PrivateAttr # noqa: E402
5152
from llama_index.core.llms.callbacks import ( # noqa: E402
5253
llm_chat_callback,
@@ -410,3 +411,13 @@ async def gen() -> ChatResponseAsyncGen:
410411
)
411412

412413
return gen()
414+
415+
def get_tool_calls_from_response(
416+
self,
417+
response: ChatResponse,
418+
error_on_no_tool_call: bool = False,
419+
**kwargs: Any,
420+
) -> List[ToolSelection]:
421+
return super().get_tool_calls_from_response(
422+
response, error_on_no_tool_call=error_on_no_tool_call, **kwargs
423+
)

0 commit comments

Comments
 (0)