Skip to content

Commit 5f6f592

Browse files
committed
feat(agenthub): add agenthub llm
1 parent 55de34c commit 5f6f592

7 files changed

Lines changed: 2143 additions & 1989 deletions

File tree

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "uipath"
3-
version = "2.0.73"
3+
version = "2.0.74"
44
description = "Python SDK and CLI for UiPath Platform, enabling programmatic interaction with automation services, process management, and deployment tools."
55
readme = { file = "README.md", content-type = "text/markdown" }
66
requires-python = ">=3.10"

src/uipath/_services/llm_gateway_service.py

Lines changed: 18 additions & 85 deletions
Original file line numberDiff line numberDiff line change
@@ -10,9 +10,9 @@
1010
TextEmbedding,
1111
ToolChoice,
1212
ToolDefinition,
13-
UsageInfo,
1413
)
1514
from ..tracing._traced import traced
15+
from ..utils import EndpointManager
1616
from ._base_service import BaseService
1717

1818
# Common constants
@@ -54,36 +54,12 @@ class UiPathOpenAIService(BaseService):
5454
def __init__(self, config: Config, execution_context: ExecutionContext) -> None:
5555
super().__init__(config=config, execution_context=execution_context)
5656

57-
@traced(name="llm_embeddings_usage", run_type="uipath")
58-
async def embeddings_usage(
59-
self, input: str, embedding_model: str = EmbeddingModels.text_embedding_ada_002
60-
):
61-
"""Embedd the input text using llm gateway service.
62-
63-
Args:
64-
input (str): The input text to embedd.
65-
embedding_model (str, optional): The embedding model to use. Defaults to text-embedding-ada-002.
66-
67-
Returns:
68-
EmbeddingUsageInfo: The embedding usage information.
69-
"""
70-
endpoint = Endpoint(
71-
f"/llmgateway_/openai/deployments/{embedding_model}/embeddings/usage"
72-
)
73-
74-
response = await self.request_async(
75-
"POST",
76-
endpoint,
77-
content=json.dumps({"input": input}),
78-
params={"api-version": API_VERSION},
79-
headers=DEFAULT_LLM_HEADERS,
80-
)
81-
82-
return UsageInfo.model_validate(response.json())
83-
8457
@traced(name="llm_embeddings", run_type="uipath")
8558
async def embeddings(
86-
self, input: str, embedding_model: str = EmbeddingModels.text_embedding_ada_002
59+
self,
60+
input: str,
61+
embedding_model: str = EmbeddingModels.text_embedding_ada_002,
62+
openai_api_version: str = API_VERSION,
8763
):
8864
"""Embed the input text using llm gateway service.
8965
@@ -93,9 +69,10 @@ async def embeddings(
9369
Returns:
9470
TextEmbedding: The embedding response.
9571
"""
96-
endpoint = Endpoint(
97-
f"/llmgateway_/openai/deployments/{embedding_model}/embeddings"
72+
endpoint = EndpointManager.get_embeddings_endpoint().format(
73+
model=embedding_model, api_version=openai_api_version
9874
)
75+
endpoint = Endpoint("/" + endpoint)
9976

10077
response = await self.request_async(
10178
"POST",
@@ -114,6 +91,7 @@ async def chat_completions(
11491
model: str = ChatModels.gpt_4o_mini_2024_07_18,
11592
max_tokens: int = 50,
11693
temperature: float = 0,
94+
api_version: str = API_VERSION,
11795
):
11896
"""Get chat completions using llm gateway service.
11997
@@ -139,59 +117,10 @@ async def chat_completions(
139117
Returns:
140118
ChatCompletion: The chat completion response.
141119
"""
142-
endpoint = Endpoint(f"/llmgateway_/openai/deployments/{model}/chat/completions")
143-
144-
request_body = {
145-
"messages": messages,
146-
"max_tokens": max_tokens,
147-
"temperature": temperature,
148-
}
149-
150-
response = await self.request_async(
151-
"POST",
152-
endpoint,
153-
content=json.dumps(request_body),
154-
params={"api-version": API_VERSION},
155-
headers=DEFAULT_LLM_HEADERS,
156-
)
157-
158-
return ChatCompletion.model_validate(response.json())
159-
160-
@traced(name="llm_chat_completions_usage", run_type="uipath")
161-
async def chat_completions_usage(
162-
self,
163-
messages: List[Dict[str, str]],
164-
model: str = ChatModels.gpt_4o_mini_2024_07_18,
165-
max_tokens: int = 50,
166-
temperature: float = 0,
167-
):
168-
"""Get chat completions usage using llm gateway service.
169-
170-
Args:
171-
messages (List[Dict[str, str]]): List of message dictionaries with 'role' and 'content' keys.
172-
The supported roles are 'system', 'user', and 'assistant'.
173-
174-
Example:
175-
```
176-
[
177-
{"role": "system", "content": "You are a helpful Python programming assistant."},
178-
{"role": "user", "content": "How do I read a file in Python?"},
179-
{"role": "assistant", "content": "You can use the built-in open() function."},
180-
{"role": "user", "content": "Can you show an example?"}
181-
]
182-
```
183-
The conversation history can be included to provide context to the model.
184-
model (str, optional): The model to use for chat completion. Defaults to ChatModels.gpt_4o_mini_2024_07_18.
185-
max_tokens (int, optional): Maximum number of tokens to generate. Defaults to 50.
186-
temperature (float, optional): Temperature for sampling, between 0 and 1.
187-
Lower values make output more deterministic. Defaults to 0.
188-
189-
Returns:
190-
ChatCompletion: The chat completion usage response.
191-
"""
192-
endpoint = Endpoint(
193-
f"/llmgateway_/openai/deployments/{model}/chat/completions/usage"
120+
endpoint = EndpointManager.get_passthrough_endpoint().format(
121+
model=model, api_version=api_version
194122
)
123+
endpoint = Endpoint("/" + endpoint)
195124

196125
request_body = {
197126
"messages": messages,
@@ -207,7 +136,7 @@ async def chat_completions_usage(
207136
headers=DEFAULT_LLM_HEADERS,
208137
)
209138

210-
return UsageInfo.model_validate(response.json())
139+
return ChatCompletion.model_validate(response.json())
211140

212141

213142
class UiPathLlmChatService(BaseService):
@@ -229,6 +158,7 @@ async def chat_completions(
229158
top_p: float = 1,
230159
tools: Optional[List[ToolDefinition]] = None,
231160
tool_choice: Optional[ToolChoice] = None,
161+
api_version: str = NORMALIZED_API_VERSION,
232162
):
233163
"""Get chat completions using UiPath's normalized LLM Gateway API.
234164
@@ -250,7 +180,10 @@ async def chat_completions(
250180
Returns:
251181
ChatCompletion: The chat completion response.
252182
"""
253-
endpoint = Endpoint("/llmgateway_/api/chat/completions")
183+
endpoint = EndpointManager.get_normalized_endpoint().format(
184+
model=model, api_version=api_version
185+
)
186+
endpoint = Endpoint("/" + endpoint)
254187

255188
request_body = {
256189
"messages": messages,

src/uipath/utils/__init__.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
from ._endpoints_manager import EndpointManager
2+
3+
__all__ = [
4+
"EndpointManager",
5+
]
Lines changed: 72 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,72 @@
1+
import logging
2+
import os
3+
from enum import Enum
4+
from typing import Optional
5+
6+
import httpx
7+
8+
loggger = logging.getLogger(__name__)
9+
10+
11+
class UiPathEndpoints(Enum):
12+
AH_NORMALIZED_COMPLETION_ENDPOINT = "agenthub_/llm/api/chat/completions"
13+
AH_PASSTHROUGH_COMPLETION_ENDPOINT = "agenthub_/llm/openai/deployments/{model}/chat/completions?api-version={api_version}"
14+
AH_EMBEDDING_ENDPOINT = (
15+
"agenthub_/llm/openai/deployments/{model}/embeddings?api-version={api_version}"
16+
)
17+
AH_CAPABILITIES_ENDPOINT = "agenthub_/llm/api/capabilities"
18+
19+
NORMALIZED_COMPLETION_ENDPOINT = "llmgateway_/api/chat/completions"
20+
PASSTHROUGH_COMPLETION_ENDPOINT = "llmgateway_/openai/deployments/{model}/chat/completions?api-version={api_version}"
21+
EMBEDDING_ENDPOINT = (
22+
"llmgateway_/openai/deployments/{model}/embeddings?api-version={api_version}"
23+
)
24+
25+
26+
class EndpointManager:
27+
"""Manages and caches the UiPath endpoints."""
28+
29+
_base_url = os.getenv("UIPATH_URL", "")
30+
_agenthub_available: Optional[bool] = None
31+
32+
@classmethod
33+
def is_agenthub_available(cls) -> bool:
34+
"""Check if AgentHub is available and cache the result."""
35+
if cls._agenthub_available is None:
36+
cls._agenthub_available = cls._check_agenthub()
37+
return cls._agenthub_available
38+
39+
@classmethod
40+
def _check_agenthub(cls) -> bool:
41+
"""Perform the actual check for AgentHub capabilities."""
42+
try:
43+
with httpx.Client() as http_client:
44+
base_url = os.getenv("UIPATH_URL", "")
45+
capabilities_url = f"{base_url.rstrip('/')}/{UiPathEndpoints.AH_CAPABILITIES_ENDPOINT.value}"
46+
loggger.debug(f"Checking AgentHub capabilities at {capabilities_url}")
47+
response = http_client.get(capabilities_url)
48+
return response.status_code == 200
49+
except Exception as e:
50+
loggger.error(f"Error checking AgentHub capabilities: {e}", exc_info=True)
51+
return False
52+
53+
@classmethod
54+
def get_passthrough_endpoint(cls) -> str:
55+
if cls.is_agenthub_available():
56+
return UiPathEndpoints.AH_PASSTHROUGH_COMPLETION_ENDPOINT.value
57+
58+
return UiPathEndpoints.PASSTHROUGH_COMPLETION_ENDPOINT.value
59+
60+
@classmethod
61+
def get_normalized_endpoint(cls) -> str:
62+
if cls.is_agenthub_available():
63+
return UiPathEndpoints.AH_NORMALIZED_COMPLETION_ENDPOINT.value
64+
65+
return UiPathEndpoints.NORMALIZED_COMPLETION_ENDPOINT.value
66+
67+
@classmethod
68+
def get_embeddings_endpoint(cls) -> str:
69+
if cls.is_agenthub_available():
70+
return UiPathEndpoints.AH_EMBEDDING_ENDPOINT.value
71+
72+
return UiPathEndpoints.EMBEDDING_ENDPOINT.value

tests/sdk/services/test_llm_integration.py

Lines changed: 0 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -74,20 +74,6 @@ async def test_embeddings_real(self, llm_service):
7474
assert hasattr(result, "usage")
7575
assert result.usage.prompt_tokens > 0
7676

77-
@pytest.mark.asyncio
78-
async def test_embeddings_usage_real(self, llm_service):
79-
"""Test the embeddings_usage function with a real API call."""
80-
input_text = "Testing the embedding usage endpoint."
81-
82-
# Make the actual API call
83-
result = await llm_service.embeddings_usage(input=input_text)
84-
85-
# Validate the response
86-
assert result is not None
87-
assert hasattr(result, "encoding")
88-
assert hasattr(result, "prompt_tokens")
89-
assert result.prompt_tokens > 0
90-
9177
@pytest.mark.asyncio
9278
async def test_chat_completions_real(self, llm_service):
9379
"""Test the chat_completions function with a real API call."""
@@ -115,31 +101,6 @@ async def test_chat_completions_real(self, llm_service):
115101
assert hasattr(result, "usage")
116102
assert result.usage.prompt_tokens > 0
117103

118-
@pytest.mark.asyncio
119-
async def test_chat_completions_usage_real(self, llm_service):
120-
"""Test the chat_completions_usage function with a real API call."""
121-
messages = [
122-
{"role": "system", "content": "You are a helpful assistant."},
123-
{"role": "user", "content": "What is the capital of France?"},
124-
]
125-
126-
# Make the actual API call
127-
result = await llm_service.chat_completions_usage(
128-
messages=messages,
129-
model=ChatModels.gpt_4o_mini_2024_07_18,
130-
max_tokens=50,
131-
temperature=0.7,
132-
)
133-
134-
# Validate the response
135-
assert result is not None
136-
assert hasattr(result, "encoding")
137-
assert hasattr(result, "prompt_tokens")
138-
assert result.prompt_tokens > 0
139-
assert isinstance(result.prompt_tokens, int)
140-
assert isinstance(result.encoding, str)
141-
assert len(result.encoding) > 0
142-
143104
@pytest.mark.asyncio
144105
async def test_embeddings_with_custom_model_real(self, llm_service):
145106
"""Test the embeddings function with a custom model."""

tests/sdk/services/test_llm_service.py

Lines changed: 0 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -35,26 +35,6 @@ def test_init(self, config, execution_context):
3535
assert service._config == config
3636
assert service._execution_context == execution_context
3737

38-
@patch.object(UiPathOpenAIService, "request_async")
39-
@pytest.mark.asyncio
40-
async def test_embeddings_usage(self, mock_request, openai_service):
41-
# Mock response
42-
mock_response = MagicMock()
43-
mock_response.json.return_value = {
44-
"encoding": "cl100k_base",
45-
"prompt_tokens": 4,
46-
}
47-
mock_request.return_value = mock_response
48-
49-
# Call the method
50-
result = await openai_service.embeddings_usage(input="Test input")
51-
52-
# Assertions
53-
mock_request.assert_called_once()
54-
assert isinstance(result, UsageInfo)
55-
assert result.encoding == "cl100k_base"
56-
assert result.prompt_tokens == 4
57-
5838
@patch.object(UiPathOpenAIService, "request_async")
5939
@pytest.mark.asyncio
6040
async def test_embeddings(self, mock_request, openai_service):
@@ -78,40 +58,6 @@ async def test_embeddings(self, mock_request, openai_service):
7858
assert result.model == "text-embedding-ada-002"
7959
assert result.usage.prompt_tokens == 4
8060

81-
@patch.object(UiPathOpenAIService, "request_async")
82-
@pytest.mark.asyncio
83-
async def test_chat_completions_usage(self, mock_request, openai_service):
84-
# Mock response
85-
mock_response = MagicMock()
86-
mock_response.json.return_value = {
87-
"encoding": "cl100k_base",
88-
"prompt_tokens": 10,
89-
}
90-
mock_request.return_value = mock_response
91-
92-
# Test messages
93-
messages = [
94-
{"role": "system", "content": "You are a helpful assistant"},
95-
{"role": "user", "content": "Hello"},
96-
]
97-
98-
# Call the method
99-
result = await openai_service.chat_completions_usage(
100-
messages=messages, max_tokens=50, temperature=0.5
101-
)
102-
103-
# Assertions
104-
mock_request.assert_called_once()
105-
assert isinstance(result, UsageInfo)
106-
assert result.encoding == "cl100k_base"
107-
assert result.prompt_tokens == 10
108-
109-
# Verify the correct endpoint was called
110-
args, kwargs = mock_request.call_args
111-
assert json.loads(kwargs["content"])["messages"] == messages
112-
assert json.loads(kwargs["content"])["max_tokens"] == 50
113-
assert json.loads(kwargs["content"])["temperature"] == 0.5
114-
11561
@patch.object(UiPathOpenAIService, "request_async")
11662
@pytest.mark.asyncio
11763
async def test_embeddings_with_custom_model(self, mock_request, openai_service):

0 commit comments

Comments
 (0)