Skip to content

Commit 6c822a4

Browse files
committed
chore: fix runtime lint
1 parent a2c018a commit 6c822a4

6 files changed

Lines changed: 136 additions & 123 deletions

File tree

src/uipath_llamaindex/_cli/_runtime/_hitl.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,7 @@ async def read(cls, resume_trigger: UiPathResumeTrigger) -> Optional[str]:
9090
return job.output_arguments
9191

9292
case UiPathResumeTriggerType.API:
93-
if resume_trigger.api_resume.inbox_id:
93+
if resume_trigger.api_resume and resume_trigger.api_resume.inbox_id:
9494
return await _get_api_payload(resume_trigger.api_resume.inbox_id)
9595

9696
case _:

src/uipath_llamaindex/_cli/_runtime/_runtime.py

Lines changed: 21 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,10 @@
1212
JsonPickleSerializer,
1313
)
1414
from llama_index.core.workflow.handler import WorkflowHandler
15-
from openinference.instrumentation.llama_index import LlamaIndexInstrumentor, get_current_span
15+
from openinference.instrumentation.llama_index import (
16+
LlamaIndexInstrumentor,
17+
get_current_span,
18+
)
1619
from opentelemetry import trace
1720
from opentelemetry.sdk.trace import TracerProvider
1821
from opentelemetry.sdk.trace.export import BatchSpanProcessor
@@ -23,14 +26,13 @@
2326
UiPathRuntimeResult,
2427
UiPathRuntimeStatus,
2528
)
29+
from uipath.tracing import TracingManager
2630

2731
from .._tracing._oteladapter import LlamaIndexExporter
2832
from ._context import UiPathLlamaIndexRuntimeContext
2933
from ._exception import UiPathLlamaIndexRuntimeError
3034
from ._hitl import HitlProcessor, HitlReader
3135

32-
from uipath.tracing import TracingManager
33-
3436
logger = logging.getLogger(__name__)
3537

3638

@@ -74,14 +76,20 @@ async def execute(self) -> Optional[UiPathRuntimeResult]:
7476
if os.path.exists(self.state_file_path):
7577
os.remove(self.state_file_path)
7678

79+
if self.context.workflow is None:
80+
return None
81+
7782
start_event_class = self.context.workflow._start_event_class
78-
ev = start_event_class(**self.context.input_json)
83+
ev = start_event_class(**(self.context.input_json or {}))
7984
await self.load_workflow_context()
8085

86+
if self.context.workflow_context is None:
87+
return None
88+
8189
handler: WorkflowHandler = self.context.workflow.run(
8290
start_event=ev if self.context.resume else None,
8391
ctx=self.context.workflow_context,
84-
**self.context.input_json,
92+
**(self.context.input_json or {}),
8593
)
8694

8795
resume_trigger: Optional[UiPathResumeTrigger] = None
@@ -94,9 +102,10 @@ async def execute(self) -> Optional[UiPathRuntimeResult]:
94102
if self.context.resume and not response_applied:
95103
# If we are resuming, we need to apply the response to the event stream.
96104
response_applied = True
97-
self.context.workflow_context.send_event(
98-
await self.get_response_event()
99-
)
105+
response_event = await self.get_response_event()
106+
if response_event:
107+
# If we have a response event, send it to the workflow context.
108+
self.context.workflow_context.send_event(response_event)
100109
else:
101110
resume_trigger = await hitl_processor.create_resume_trigger()
102111
break
@@ -244,6 +253,9 @@ async def load_workflow_context(self):
244253
"""
245254
logger.debug(f"Resumed: {self.context.resume} Input: {self.context.input_json}")
246255

256+
if self.context.workflow is None:
257+
return
258+
247259
if not self.context.resume:
248260
self.context.workflow_context = Context(self.context.workflow)
249261
return
@@ -277,7 +289,7 @@ async def get_response_event(self) -> Optional[HumanResponseEvent]:
277289
"""
278290
if self.context.input_json:
279291
# If input_json is provided, use it to create a HumanResponseEvent
280-
return HumanResponseEvent(**self.context.input_json)
292+
return HumanResponseEvent(**(self.context.input_json or {}))
281293
# If resumed_trigger is set, fetch the feedback
282294
if self.context.resumed_trigger:
283295
feedback = await HitlReader.read(self.context.resumed_trigger)

src/uipath_llamaindex/_cli/cli_init.py

Lines changed: 13 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
from typing import Any, Dict
66

77
from llama_index.core.workflow import StopEvent, Workflow
8-
from llama_index.core.workflow.drawing import StepConfig
8+
from llama_index.core.workflow.drawing import StepConfig # type: ignore
99
from llama_index.core.workflow.events import (
1010
HumanResponseEvent,
1111
InputRequiredEvent,
@@ -129,11 +129,13 @@ def draw_all_possible_flows_mermaid(
129129

130130
# Track event types to avoid duplicates
131131
event_types = {}
132+
current_stop_event = (
133+
None # Only one kind of `StopEvent` is allowed in a `Workflow`.
134+
)
135+
step_config: StepConfig | None = None
132136

133-
# Only one kind of `StopEvent` is allowed in a `Workflow`.
134-
current_stop_event = None
135137
for _, step_func in steps.items():
136-
step_config: StepConfig = getattr(step_func, "__step_config", None)
138+
step_config = getattr(step_func, "__step_config", None)
137139
if step_config is None:
138140
continue
139141

@@ -227,12 +229,13 @@ def draw_all_possible_flows_mermaid(
227229
event_id = f"event_{clean_id(event_name)}"
228230

229231
if step_name == "_done" and issubclass(event_type, StopEvent):
230-
stop_event_name = current_stop_event.__name__
231-
stop_event_id = f"event_{clean_id(stop_event_name)}"
232-
edge = f"{stop_event_id} --> {step_id}"
233-
if edge not in edges:
234-
edges.add(edge)
235-
mermaid_diagram.append(f" {edge}")
232+
if current_stop_event:
233+
stop_event_name = current_stop_event.__name__
234+
stop_event_id = f"event_{clean_id(stop_event_name)}"
235+
edge = f"{stop_event_id} --> {step_id}"
236+
if edge not in edges:
237+
edges.add(edge)
238+
mermaid_diagram.append(f" {edge}")
236239
else:
237240
edge = f"{event_id} --> {step_id}"
238241
if edge not in edges:

src/uipath_llamaindex/_cli/cli_run.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import asyncio
22
import logging
33
from os import environ as env
4-
from typing import Any, Dict, Optional
4+
from typing import Optional
55

66
from dotenv import load_dotenv
77
from uipath._cli._runtime._contracts import UiPathTraceContext
Lines changed: 46 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -1,48 +1,46 @@
1-
import os
2-
from enum import Enum
3-
from typing import Any, Union
4-
5-
from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding
6-
7-
8-
class OpenAIEmbeddingModel(Enum):
9-
TEXT_EMBEDDING_3_LARGE = "text-embedding-3-large"
10-
TEXT_EMBEDDING_ADA_002 = "text-embedding-ada-002"
11-
12-
13-
class UiPathOpenAIEmbedding(AzureOpenAIEmbedding):
14-
def __init__(
15-
self,
16-
model: Union[
17-
str, OpenAIEmbeddingModel
18-
] = OpenAIEmbeddingModel.TEXT_EMBEDDING_ADA_002,
19-
api_version: str = "2024-10-21",
20-
**kwargs: Any,
21-
):
22-
default_headers_dict = {
23-
"X-UIPATH-STREAMING-ENABLED": "false",
24-
"X-UiPath-LlmGateway-RequestingProduct": "uipath-python-sdk",
25-
"X-UiPath-LlmGateway-RequestingFeature": "llama-index-agent",
26-
}
27-
28-
model_value = model.value if isinstance(model, OpenAIEmbeddingModel) else model
29-
30-
base_url = os.environ.get(
31-
"UIPATH_URL", "EMPTY"
32-
).rstrip("/")
33-
34-
if base_url == "EMPTY":
35-
raise ValueError(
36-
"UIPATH_URL environment variable is not set. Please run uipath auth."
37-
)
38-
39-
defaults = {
40-
"model": model_value,
41-
"deployment_name": model_value,
42-
"azure_endpoint": f"{base_url}/llmgateway_/",
43-
"api_key": os.environ.get("UIPATH_ACCESS_TOKEN"),
44-
"api_version": api_version,
45-
"default_headers": default_headers_dict,
46-
}
47-
final_kwargs = {**defaults, **kwargs}
48-
super().__init__(**final_kwargs)
1+
import os
2+
from enum import Enum
3+
from typing import Any, Union
4+
5+
from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding # type: ignore
6+
7+
8+
class OpenAIEmbeddingModel(Enum):
9+
TEXT_EMBEDDING_3_LARGE = "text-embedding-3-large"
10+
TEXT_EMBEDDING_ADA_002 = "text-embedding-ada-002"
11+
12+
13+
class UiPathOpenAIEmbedding(AzureOpenAIEmbedding):
14+
def __init__(
15+
self,
16+
model: Union[
17+
str, OpenAIEmbeddingModel
18+
] = OpenAIEmbeddingModel.TEXT_EMBEDDING_ADA_002,
19+
api_version: str = "2024-10-21",
20+
**kwargs: Any,
21+
):
22+
default_headers_dict = {
23+
"X-UIPATH-STREAMING-ENABLED": "false",
24+
"X-UiPath-LlmGateway-RequestingProduct": "uipath-python-sdk",
25+
"X-UiPath-LlmGateway-RequestingFeature": "llama-index-agent",
26+
}
27+
28+
model_value = model.value if isinstance(model, OpenAIEmbeddingModel) else model
29+
30+
base_url = os.environ.get("UIPATH_URL", "EMPTY").rstrip("/")
31+
32+
if base_url == "EMPTY":
33+
raise ValueError(
34+
"UIPATH_URL environment variable is not set. Please run uipath auth."
35+
)
36+
37+
defaults = {
38+
"model": model_value,
39+
"deployment_name": model_value,
40+
"azure_endpoint": f"{base_url}/llmgateway_/",
41+
"api_key": os.environ.get("UIPATH_ACCESS_TOKEN"),
42+
"api_version": api_version,
43+
"default_headers": default_headers_dict,
44+
}
45+
final_kwargs = {**defaults, **kwargs}
46+
super().__init__(**final_kwargs)
Lines changed: 54 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -1,54 +1,54 @@
1-
import os
2-
from enum import Enum
3-
from typing import Any, Union
4-
5-
from llama_index.llms.azure_openai import AzureOpenAI
6-
7-
8-
class OpenAIModel(Enum):
9-
GPT_4_1_2025_04_14 = "gpt-4.1-2025-04-14"
10-
GPT_4_1_MINI_2025_04_14 = "gpt-4.1-mini-2025-04-14"
11-
GPT_4_1_NANO_2025_04_14 = "gpt-4.1-nano-2025-04-14"
12-
GPT_4O_2024_05_13 = "gpt-4o-2024-05-13"
13-
GPT_4O_2024_08_06 = "gpt-4o-2024-08-06"
14-
GPT_4O_2024_11_20 = "gpt-4o-2024-11-20"
15-
GPT_4O_MINI_2024_07_18 = "gpt-4o-mini-2024-07-18"
16-
O3_MINI_2025_01_31 = "o3-mini-2025-01-31"
17-
TEXT_DAVINCI_003 = "text-davinci-003"
18-
19-
20-
# Define your custom AzureOpenAI class with default settings
21-
class UiPathOpenAI(AzureOpenAI):
22-
def __init__(
23-
self,
24-
model: Union[str, OpenAIModel] = OpenAIModel.GPT_4O_MINI_2024_07_18,
25-
api_version: str = "2024-10-21",
26-
**kwargs: Any,
27-
):
28-
default_headers_dict = {
29-
"X-UIPATH-STREAMING-ENABLED": "false",
30-
"X-UiPath-LlmGateway-RequestingProduct": "uipath-python-sdk",
31-
"X-UiPath-LlmGateway-RequestingFeature": "llama-index-agent",
32-
}
33-
model_value = model.value if isinstance(model, OpenAIModel) else model
34-
35-
base_url = os.environ.get(
36-
"UIPATH_URL", "EMPTY"
37-
).rstrip("/")
38-
39-
if base_url == "EMPTY":
40-
raise ValueError(
41-
"UIPATH_URL environment variable is not set. Please run uipath auth."
42-
)
43-
44-
defaults = {
45-
"model": model_value,
46-
"deployment_name": model_value,
47-
"azure_endpoint": f"{base_url}/llmgateway_/",
48-
"api_key": os.environ.get("UIPATH_ACCESS_TOKEN"),
49-
"api_version": api_version,
50-
"is_chat_model": True,
51-
"default_headers": default_headers_dict,
52-
}
53-
final_kwargs = {**defaults, **kwargs}
54-
super().__init__(**final_kwargs)
1+
import os
2+
from enum import Enum
3+
from typing import Any, Union
4+
5+
from llama_index.llms.azure_openai import AzureOpenAI # type: ignore
6+
7+
8+
class OpenAIModel(Enum):
9+
GPT_4_1_2025_04_14 = "gpt-4.1-2025-04-14"
10+
GPT_4_1_MINI_2025_04_14 = "gpt-4.1-mini-2025-04-14"
11+
GPT_4_1_NANO_2025_04_14 = "gpt-4.1-nano-2025-04-14"
12+
GPT_4O_2024_05_13 = "gpt-4o-2024-05-13"
13+
GPT_4O_2024_08_06 = "gpt-4o-2024-08-06"
14+
GPT_4O_2024_11_20 = "gpt-4o-2024-11-20"
15+
GPT_4O_MINI_2024_07_18 = "gpt-4o-mini-2024-07-18"
16+
O3_MINI_2025_01_31 = "o3-mini-2025-01-31"
17+
TEXT_DAVINCI_003 = "text-davinci-003"
18+
19+
20+
# Define your custom AzureOpenAI class with default settings
21+
class UiPathOpenAI(AzureOpenAI):
22+
def __init__(
23+
self,
24+
model: Union[str, OpenAIModel] = OpenAIModel.GPT_4O_MINI_2024_07_18,
25+
api_version: str = "2024-10-21",
26+
**kwargs: Any,
27+
):
28+
default_headers_dict = {
29+
"X-UIPATH-STREAMING-ENABLED": "false",
30+
"X-UiPath-LlmGateway-RequestingProduct": "uipath-python-sdk",
31+
"X-UiPath-LlmGateway-RequestingFeature": "llama-index-agent",
32+
}
33+
model_value = model.value if isinstance(model, OpenAIModel) else model
34+
35+
base_url = os.environ.get(
36+
"UIPATH_URL", "EMPTY"
37+
).rstrip("/")
38+
39+
if base_url == "EMPTY":
40+
raise ValueError(
41+
"UIPATH_URL environment variable is not set. Please run uipath auth."
42+
)
43+
44+
defaults = {
45+
"model": model_value,
46+
"deployment_name": model_value,
47+
"azure_endpoint": f"{base_url}/llmgateway_/",
48+
"api_key": os.environ.get("UIPATH_ACCESS_TOKEN"),
49+
"api_version": api_version,
50+
"is_chat_model": True,
51+
"default_headers": default_headers_dict,
52+
}
53+
final_kwargs = {**defaults, **kwargs}
54+
super().__init__(**final_kwargs)

0 commit comments

Comments
 (0)