Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "uipath-langchain"
version = "0.4.25"
version = "0.4.26"
description = "Python SDK that enables developers to build and deploy LangGraph agents to the UiPath Cloud Platform"
readme = { file = "README.md", content-type = "text/markdown" }
requires-python = ">=3.11"
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
"""Multimodal LLM input handling (images, PDFs, etc.)."""

from .invoke import build_file_content_block, llm_call_with_files
from .invoke import (
build_file_content_block,
llm_call_with_files,
)
from .types import IMAGE_MIME_TYPES, FileInfo
from .utils import download_file_base64, is_image, is_pdf, sanitize_filename

Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
"""LLM invocation with multimodal file attachments."""

import asyncio
from typing import Any

from langchain_core.language_models import BaseChatModel
Expand Down Expand Up @@ -43,6 +44,24 @@ async def build_file_content_block(
raise ValueError(f"Unsupported mime_type={file_info.mime_type}")


async def build_file_content_blocks(files: list[FileInfo]) -> list[DataContentBlock]:
"""Build content blocks from file attachments.

Args:
files: List of file information to convert to content blocks

Returns:
List of DataContentBlock instances for the files
"""
if not files:
return []

file_content_blocks: list[DataContentBlock] = await asyncio.gather(
*[build_file_content_block(file) for file in files]
)
return file_content_blocks


async def llm_call_with_files(
messages: list[AnyMessage],
files: list[FileInfo],
Expand Down
2 changes: 1 addition & 1 deletion src/uipath_langchain/agent/react/llm_node.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
from uipath_langchain.agent.tools.structured_tool_with_argument_properties import (
StructuredToolWithArgumentProperties,
)
from uipath_langchain.llm import get_payload_handler
from uipath_langchain.chat.handlers import get_payload_handler

from ..exceptions import AgentTerminationException
from .constants import (
Expand Down
Original file line number Diff line number Diff line change
@@ -1,8 +1,16 @@
import asyncio
import uuid
from typing import Any
from typing import Any, cast

from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AnyMessage, HumanMessage, SystemMessage
from langchain_core.messages import (
AnyMessage,
BaseMessage,
ContentBlock,
DataContentBlock,
HumanMessage,
SystemMessage,
)
from langchain_core.messages.tool import ToolCall
from langchain_core.tools import BaseTool, StructuredTool
from uipath.agent.models.agent import (
Expand All @@ -11,18 +19,20 @@
from uipath.eval.mocks import mockable
from uipath.platform import UiPath

from uipath_langchain.agent.multimodal import FileInfo, build_file_content_block
from uipath_langchain.agent.react.jsonschema_pydantic_converter import create_model
from uipath_langchain.agent.react.multimodal import FileInfo, llm_call_with_files
from uipath_langchain.agent.react.types import AgentGraphState
from uipath_langchain.agent.tools.static_args import handle_static_args
from uipath_langchain.agent.tools.structured_tool_with_argument_properties import (
StructuredToolWithArgumentProperties,
)
from uipath_langchain.agent.tools.tool_node import (
ToolWrapperReturnType,
)
from uipath_langchain.agent.tools.tool_node import ToolWrapperReturnType
from uipath_langchain.agent.tools.utils import sanitize_tool_name
from uipath_langchain.agent.wrappers import get_job_attachment_wrapper
from uipath_langchain.chat.helpers import (
append_content_blocks_to_message,
extract_text_content,
)

ANALYZE_FILES_SYSTEM_MESSAGE = (
"Process the provided files to complete the given task. "
Expand Down Expand Up @@ -50,8 +60,8 @@ async def tool_fn(**kwargs: Any):
if "attachments" not in kwargs:
raise ValueError("Argument 'attachments' is not available")

analysisTask = kwargs["analysisTask"]
if not analysisTask:
analysis_task = kwargs["analysisTask"]
if not analysis_task:
raise ValueError("Argument 'analysisTask' is not available")

attachments = kwargs["attachments"]
Expand All @@ -60,12 +70,17 @@ async def tool_fn(**kwargs: Any):
if not files:
return {"analysisResult": "No attachments provided to analyze."}

human_message = HumanMessage(content=analysis_task)
human_message_with_files = await add_files_to_message(human_message, files)

messages: list[AnyMessage] = [
SystemMessage(content=ANALYZE_FILES_SYSTEM_MESSAGE),
HumanMessage(content=analysisTask),
cast(AnyMessage, human_message_with_files),
]
result = await llm_call_with_files(messages, files, llm)
return result
result = await llm.ainvoke(messages)

analysis_result = extract_text_content(result)
return analysis_result

job_attachment_wrapper = get_job_attachment_wrapper(output_type=output_model)

Expand Down Expand Up @@ -125,3 +140,27 @@ async def _resolve_job_attachment_arguments(
file_infos.append(file_info)

return file_infos


async def add_files_to_message(
message: BaseMessage,
files: list[FileInfo],
) -> BaseMessage:
"""Add file attachments to a message.

Args:
message: The message to add files to (any BaseMessage subclass)
files: List of file attachments to add

Returns:
New message of the same type with file content blocks appended
"""
if not files:
return message

file_content_blocks: list[DataContentBlock] = await asyncio.gather(
*[build_file_content_block(file) for file in files]
)
return append_content_blocks_to_message(
message, cast(list[ContentBlock], file_content_blocks)
)
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def _create_openai_llm(
agenthub_config: str,
byo_connection_id: str | None = None,
) -> BaseChatModel:
"""Create UiPathChatOpenAI for OpenAI models via passthrough."""
"""Create UiPathChatOpenAI for OpenAI models via LLMGateway."""
from uipath_langchain.chat.openai import UiPathChatOpenAI

azure_open_ai_latest_api_version = "2025-04-01-preview"
Expand Down Expand Up @@ -70,7 +70,7 @@ def _create_bedrock_llm(
agenthub_config: str,
byo_connection_id: str | None = None,
) -> BaseChatModel:
"""Create UiPathChatBedrockConverse for Claude models via passthrough."""
"""Create UiPathChatBedrockConverse for Claude models via LLMGateway."""
from uipath_langchain.chat.bedrock import (
UiPathChatBedrock,
UiPathChatBedrockConverse,
Expand Down Expand Up @@ -107,7 +107,7 @@ def _create_vertex_llm(
agenthub_config: str,
byo_connection_id: str | None = None,
) -> BaseChatModel:
"""Create UiPathChatVertex for Gemini models via passthrough."""
"""Create UiPathChatVertex for Gemini models via LLMGateway."""
from uipath_langchain.chat.vertex import UiPathChatVertex

match api_flavor:
Expand Down Expand Up @@ -185,7 +185,7 @@ def get_chat_model(
agenthub_config: str,
byo_connection_id: str | None = None,
) -> BaseChatModel:
"""Create and configure LLM instance using passthrough API.
"""Create and configure LLM instance using LLMGateway API.

Fetches available models from the discovery API and selects the appropriate
LLM class based on the apiFlavor field from the matching model configuration.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from .base import ModelPayloadHandler
from .bedrock_converse import BedrockConversePayloadHandler
from .bedrock_invoke import BedrockInvokePayloadHandler
from .handler_factory import get_payload_handler
from .openai_completions import OpenAICompletionsPayloadHandler
from .openai_responses import OpenAIResponsesPayloadHandler
from .vertex_gemini import VertexGeminiPayloadHandler
Expand All @@ -14,4 +15,5 @@
"OpenAICompletionsPayloadHandler",
"OpenAIResponsesPayloadHandler",
"VertexGeminiPayloadHandler",
"get_payload_handler",
]
Original file line number Diff line number Diff line change
Expand Up @@ -8,14 +8,12 @@
UiPathPassthroughChatModel,
)

from .handlers import (
BedrockConversePayloadHandler,
BedrockInvokePayloadHandler,
ModelPayloadHandler,
OpenAICompletionsPayloadHandler,
OpenAIResponsesPayloadHandler,
VertexGeminiPayloadHandler,
)
from .base import ModelPayloadHandler
from .bedrock_converse import BedrockConversePayloadHandler
from .bedrock_invoke import BedrockInvokePayloadHandler
from .openai_completions import OpenAICompletionsPayloadHandler
from .openai_responses import OpenAIResponsesPayloadHandler
from .vertex_gemini import VertexGeminiPayloadHandler

_HANDLER_REGISTRY: dict[tuple[LLMProvider, APIFlavor], type[ModelPayloadHandler]] = {
(LLMProvider.OPENAI, APIFlavor.OPENAI_COMPLETIONS): OpenAICompletionsPayloadHandler,
Expand Down
8 changes: 8 additions & 0 deletions src/uipath_langchain/chat/helpers/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
"""Message helper utilities."""

from .helpers import append_content_blocks_to_message, extract_text_content

__all__ = [
"append_content_blocks_to_message",
"extract_text_content",
]
62 changes: 62 additions & 0 deletions src/uipath_langchain/chat/helpers/helpers.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
"""Helper functions for chat messages manipulation."""

from typing import Any, cast

from langchain_core.messages import BaseMessage, ContentBlock


def append_content_blocks_to_message(
message: BaseMessage,
content_blocks: list[ContentBlock],
) -> BaseMessage:
"""Append content blocks to a message.

Args:
message: The original message (any BaseMessage subclass)
content_blocks: Content blocks to append

Returns:
New message of the same type with appended content blocks
"""
if not content_blocks:
return message

existing_content_blocks = list(message.content_blocks)
existing_content_blocks.extend(content_blocks)

return type(message)(content_blocks=existing_content_blocks)


def extract_text_content(message: BaseMessage) -> str:
"""Extract text content from an AI message.

Extracts text from content blocks using duck typing to support both
dict-like objects (with 'text' key) and objects with 'text' attribute.

Args:
message: The AI message to extract text from

Returns:
Extracted text content, with multiple text parts joined by newlines
"""
content_blocks = message.content_blocks

text_parts: list[str] = []
for block in content_blocks:
if isinstance(block, str):
text_parts.append(block)
else:
text: str | None = None
if isinstance(block, dict):
text_value = cast(dict[str, Any], block).get("text", "")
if isinstance(text_value, str):
text = text_value
elif hasattr(block, "text"):
text_attr = getattr(block, "text", "")
if isinstance(text_attr, str):
text = text_attr

if text:
text_parts.append(text)

return "\n".join(text_parts) if text_parts else ""
12 changes: 0 additions & 12 deletions src/uipath_langchain/llm/__init__.py

This file was deleted.

23 changes: 0 additions & 23 deletions src/uipath_langchain/llm/utils.py

This file was deleted.

Loading