Skip to content

Commit 8434bee

Browse files
committed
feat(agenthub): add agenthub llm
1 parent 267ce0d commit 8434bee

7 files changed

Lines changed: 113 additions & 189 deletions

File tree

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "uipath"
3-
version = "2.0.74"
3+
version = "2.0.75"
44
description = "Python SDK and CLI for UiPath Platform, enabling programmatic interaction with automation services, process management, and deployment tools."
55
readme = { file = "README.md", content-type = "text/markdown" }
66
requires-python = ">=3.10"

src/uipath/_services/llm_gateway_service.py

Lines changed: 18 additions & 85 deletions
Original file line numberDiff line numberDiff line change
@@ -10,9 +10,9 @@
1010
TextEmbedding,
1111
ToolChoice,
1212
ToolDefinition,
13-
UsageInfo,
1413
)
1514
from ..tracing._traced import traced
15+
from ..utils import EndpointManager
1616
from ._base_service import BaseService
1717

1818
# Common constants
@@ -54,36 +54,12 @@ class UiPathOpenAIService(BaseService):
5454
def __init__(self, config: Config, execution_context: ExecutionContext) -> None:
5555
super().__init__(config=config, execution_context=execution_context)
5656

57-
@traced(name="llm_embeddings_usage", run_type="uipath")
58-
async def embeddings_usage(
59-
self, input: str, embedding_model: str = EmbeddingModels.text_embedding_ada_002
60-
):
61-
"""Embedd the input text using llm gateway service.
62-
63-
Args:
64-
input (str): The input text to embedd.
65-
embedding_model (str, optional): The embedding model to use. Defaults to text-embedding-ada-002.
66-
67-
Returns:
68-
EmbeddingUsageInfo: The embedding usage information.
69-
"""
70-
endpoint = Endpoint(
71-
f"/llmgateway_/openai/deployments/{embedding_model}/embeddings/usage"
72-
)
73-
74-
response = await self.request_async(
75-
"POST",
76-
endpoint,
77-
content=json.dumps({"input": input}),
78-
params={"api-version": API_VERSION},
79-
headers=DEFAULT_LLM_HEADERS,
80-
)
81-
82-
return UsageInfo.model_validate(response.json())
83-
8457
@traced(name="llm_embeddings", run_type="uipath")
8558
async def embeddings(
86-
self, input: str, embedding_model: str = EmbeddingModels.text_embedding_ada_002
59+
self,
60+
input: str,
61+
embedding_model: str = EmbeddingModels.text_embedding_ada_002,
62+
openai_api_version: str = API_VERSION,
8763
):
8864
"""Embed the input text using llm gateway service.
8965
@@ -93,9 +69,10 @@ async def embeddings(
9369
Returns:
9470
TextEmbedding: The embedding response.
9571
"""
96-
endpoint = Endpoint(
97-
f"/llmgateway_/openai/deployments/{embedding_model}/embeddings"
72+
endpoint = EndpointManager.get_embeddings_endpoint().format(
73+
model=embedding_model, api_version=openai_api_version
9874
)
75+
endpoint = Endpoint("/" + endpoint)
9976

10077
response = await self.request_async(
10178
"POST",
@@ -114,6 +91,7 @@ async def chat_completions(
11491
model: str = ChatModels.gpt_4o_mini_2024_07_18,
11592
max_tokens: int = 50,
11693
temperature: float = 0,
94+
api_version: str = API_VERSION,
11795
):
11896
"""Get chat completions using llm gateway service.
11997
@@ -139,59 +117,10 @@ async def chat_completions(
139117
Returns:
140118
ChatCompletion: The chat completion response.
141119
"""
142-
endpoint = Endpoint(f"/llmgateway_/openai/deployments/{model}/chat/completions")
143-
144-
request_body = {
145-
"messages": messages,
146-
"max_tokens": max_tokens,
147-
"temperature": temperature,
148-
}
149-
150-
response = await self.request_async(
151-
"POST",
152-
endpoint,
153-
content=json.dumps(request_body),
154-
params={"api-version": API_VERSION},
155-
headers=DEFAULT_LLM_HEADERS,
156-
)
157-
158-
return ChatCompletion.model_validate(response.json())
159-
160-
@traced(name="llm_chat_completions_usage", run_type="uipath")
161-
async def chat_completions_usage(
162-
self,
163-
messages: List[Dict[str, str]],
164-
model: str = ChatModels.gpt_4o_mini_2024_07_18,
165-
max_tokens: int = 50,
166-
temperature: float = 0,
167-
):
168-
"""Get chat completions usage using llm gateway service.
169-
170-
Args:
171-
messages (List[Dict[str, str]]): List of message dictionaries with 'role' and 'content' keys.
172-
The supported roles are 'system', 'user', and 'assistant'.
173-
174-
Example:
175-
```
176-
[
177-
{"role": "system", "content": "You are a helpful Python programming assistant."},
178-
{"role": "user", "content": "How do I read a file in Python?"},
179-
{"role": "assistant", "content": "You can use the built-in open() function."},
180-
{"role": "user", "content": "Can you show an example?"}
181-
]
182-
```
183-
The conversation history can be included to provide context to the model.
184-
model (str, optional): The model to use for chat completion. Defaults to ChatModels.gpt_4o_mini_2024_07_18.
185-
max_tokens (int, optional): Maximum number of tokens to generate. Defaults to 50.
186-
temperature (float, optional): Temperature for sampling, between 0 and 1.
187-
Lower values make output more deterministic. Defaults to 0.
188-
189-
Returns:
190-
ChatCompletion: The chat completion usage response.
191-
"""
192-
endpoint = Endpoint(
193-
f"/llmgateway_/openai/deployments/{model}/chat/completions/usage"
120+
endpoint = EndpointManager.get_passthrough_endpoint().format(
121+
model=model, api_version=api_version
194122
)
123+
endpoint = Endpoint("/" + endpoint)
195124

196125
request_body = {
197126
"messages": messages,
@@ -207,7 +136,7 @@ async def chat_completions_usage(
207136
headers=DEFAULT_LLM_HEADERS,
208137
)
209138

210-
return UsageInfo.model_validate(response.json())
139+
return ChatCompletion.model_validate(response.json())
211140

212141

213142
class UiPathLlmChatService(BaseService):
@@ -229,6 +158,7 @@ async def chat_completions(
229158
top_p: float = 1,
230159
tools: Optional[List[ToolDefinition]] = None,
231160
tool_choice: Optional[ToolChoice] = None,
161+
api_version: str = NORMALIZED_API_VERSION,
232162
):
233163
"""Get chat completions using UiPath's normalized LLM Gateway API.
234164
@@ -250,7 +180,10 @@ async def chat_completions(
250180
Returns:
251181
ChatCompletion: The chat completion response.
252182
"""
253-
endpoint = Endpoint("/llmgateway_/api/chat/completions")
183+
endpoint = EndpointManager.get_normalized_endpoint().format(
184+
model=model, api_version=api_version
185+
)
186+
endpoint = Endpoint("/" + endpoint)
254187

255188
request_body = {
256189
"messages": messages,

src/uipath/models/llm_gateway.py

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -21,11 +21,6 @@ class TextEmbedding(BaseModel):
2121
usage: EmbeddingUsage
2222

2323

24-
class UsageInfo(BaseModel):
25-
encoding: str
26-
prompt_tokens: int
27-
28-
2924
class ToolCall(BaseModel):
3025
id: str
3126
name: str

src/uipath/utils/__init__.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
from ._endpoints_manager import EndpointManager # noqa: D104
2+
3+
__all__ = [
4+
"EndpointManager",
5+
]
Lines changed: 88 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,88 @@
1+
import logging
2+
import os
3+
from enum import Enum
4+
from typing import Optional
5+
6+
import httpx
7+
8+
loggger = logging.getLogger(__name__)
9+
10+
11+
class UiPathEndpoints(Enum):
12+
AH_NORMALIZED_COMPLETION_ENDPOINT = "agenthub_/llm/api/chat/completions"
13+
AH_PASSTHROUGH_COMPLETION_ENDPOINT = "agenthub_/llm/openai/deployments/{model}/chat/completions?api-version={api_version}"
14+
AH_EMBEDDING_ENDPOINT = (
15+
"agenthub_/llm/openai/deployments/{model}/embeddings?api-version={api_version}"
16+
)
17+
AH_CAPABILITIES_ENDPOINT = "agenthub_/llm/api/capabilities"
18+
19+
NORMALIZED_COMPLETION_ENDPOINT = "llmgateway_/api/chat/completions"
20+
PASSTHROUGH_COMPLETION_ENDPOINT = "llmgateway_/openai/deployments/{model}/chat/completions?api-version={api_version}"
21+
EMBEDDING_ENDPOINT = (
22+
"llmgateway_/openai/deployments/{model}/embeddings?api-version={api_version}"
23+
)
24+
25+
26+
class EndpointManager:
27+
"""Manages and caches the UiPath endpoints.
28+
This class provides functionality to determine which UiPath endpoints to use based on
29+
the availability of AgentHub. It checks for AgentHub capabilities and caches the result
30+
to avoid repeated network calls.
31+
Class Attributes:
32+
_base_url (str): The base URL for UiPath services, retrieved from the UIPATH_URL
33+
environment variable.
34+
_agenthub_available (Optional[bool]): Cached result of AgentHub availability check.
35+
36+
Methods:
37+
is_agenthub_available(): Checks if AgentHub is available, caching the result.
38+
get_passthrough_endpoint(): Returns the appropriate passthrough completion endpoint.
39+
get_normalized_endpoint(): Returns the appropriate normalized completion endpoint.
40+
get_embeddings_endpoint(): Returns the appropriate embeddings endpoint.
41+
All endpoint methods automatically select between AgentHub and standard endpoints
42+
based on availability.
43+
""" # noqa: D205
44+
45+
_base_url = os.getenv("UIPATH_URL", "")
46+
_agenthub_available: Optional[bool] = None
47+
48+
@classmethod
49+
def is_agenthub_available(cls) -> bool:
50+
"""Check if AgentHub is available and cache the result."""
51+
if cls._agenthub_available is None:
52+
cls._agenthub_available = cls._check_agenthub()
53+
return cls._agenthub_available
54+
55+
@classmethod
56+
def _check_agenthub(cls) -> bool:
57+
"""Perform the actual check for AgentHub capabilities."""
58+
try:
59+
with httpx.Client() as http_client:
60+
base_url = os.getenv("UIPATH_URL", "")
61+
capabilities_url = f"{base_url.rstrip('/')}/{UiPathEndpoints.AH_CAPABILITIES_ENDPOINT.value}"
62+
loggger.debug(f"Checking AgentHub capabilities at {capabilities_url}")
63+
response = http_client.get(capabilities_url)
64+
return response.status_code == 200
65+
except Exception as e:
66+
loggger.error(f"Error checking AgentHub capabilities: {e}", exc_info=True)
67+
return False
68+
69+
@classmethod
70+
def get_passthrough_endpoint(cls) -> str:
71+
if cls.is_agenthub_available():
72+
return UiPathEndpoints.AH_PASSTHROUGH_COMPLETION_ENDPOINT.value
73+
74+
return UiPathEndpoints.PASSTHROUGH_COMPLETION_ENDPOINT.value
75+
76+
@classmethod
77+
def get_normalized_endpoint(cls) -> str:
78+
if cls.is_agenthub_available():
79+
return UiPathEndpoints.AH_NORMALIZED_COMPLETION_ENDPOINT.value
80+
81+
return UiPathEndpoints.NORMALIZED_COMPLETION_ENDPOINT.value
82+
83+
@classmethod
84+
def get_embeddings_endpoint(cls) -> str:
85+
if cls.is_agenthub_available():
86+
return UiPathEndpoints.AH_EMBEDDING_ENDPOINT.value
87+
88+
return UiPathEndpoints.EMBEDDING_ENDPOINT.value

tests/sdk/services/test_llm_integration.py

Lines changed: 0 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -74,20 +74,6 @@ async def test_embeddings_real(self, llm_service):
7474
assert hasattr(result, "usage")
7575
assert result.usage.prompt_tokens > 0
7676

77-
@pytest.mark.asyncio
78-
async def test_embeddings_usage_real(self, llm_service):
79-
"""Test the embeddings_usage function with a real API call."""
80-
input_text = "Testing the embedding usage endpoint."
81-
82-
# Make the actual API call
83-
result = await llm_service.embeddings_usage(input=input_text)
84-
85-
# Validate the response
86-
assert result is not None
87-
assert hasattr(result, "encoding")
88-
assert hasattr(result, "prompt_tokens")
89-
assert result.prompt_tokens > 0
90-
9177
@pytest.mark.asyncio
9278
async def test_chat_completions_real(self, llm_service):
9379
"""Test the chat_completions function with a real API call."""
@@ -115,31 +101,6 @@ async def test_chat_completions_real(self, llm_service):
115101
assert hasattr(result, "usage")
116102
assert result.usage.prompt_tokens > 0
117103

118-
@pytest.mark.asyncio
119-
async def test_chat_completions_usage_real(self, llm_service):
120-
"""Test the chat_completions_usage function with a real API call."""
121-
messages = [
122-
{"role": "system", "content": "You are a helpful assistant."},
123-
{"role": "user", "content": "What is the capital of France?"},
124-
]
125-
126-
# Make the actual API call
127-
result = await llm_service.chat_completions_usage(
128-
messages=messages,
129-
model=ChatModels.gpt_4o_mini_2024_07_18,
130-
max_tokens=50,
131-
temperature=0.7,
132-
)
133-
134-
# Validate the response
135-
assert result is not None
136-
assert hasattr(result, "encoding")
137-
assert hasattr(result, "prompt_tokens")
138-
assert result.prompt_tokens > 0
139-
assert isinstance(result.prompt_tokens, int)
140-
assert isinstance(result.encoding, str)
141-
assert len(result.encoding) > 0
142-
143104
@pytest.mark.asyncio
144105
async def test_embeddings_with_custom_model_real(self, llm_service):
145106
"""Test the embeddings function with a custom model."""

0 commit comments

Comments
 (0)