Skip to content

Commit 8c82ec2

Browse files
feat: add integration test for files (#702)
1 parent 7d80927 commit 8c82ec2

File tree

6 files changed

+174
-0
lines changed

6 files changed

+174
-0
lines changed
Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
{
2+
"prompt": "Describe the content of this file in one sentence."
3+
}
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
{
2+
"dependencies": ["."],
3+
"graphs": {
4+
"agent": "./src/main.py:graph"
5+
}
6+
}
Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
[project]
2+
name = "multimodal-invoke"
3+
version = "0.0.1"
4+
description = "Test multimodal LLM invoke with file attachments via get_chat_model"
5+
authors = [{ name = "John Doe", email = "john.doe@myemail.com" }]
6+
dependencies = [
7+
"langgraph>=0.2.70",
8+
"langchain-core>=0.3.34",
9+
"langgraph-checkpoint-sqlite>=2.0.3",
10+
"uipath-langchain[vertex,bedrock]",
11+
"pydantic>=2.10.6",
12+
]
13+
requires-python = ">=3.11"
14+
15+
[tool.uv.sources]
16+
uipath-langchain = { path = "../../", editable = true }

testcases/multimodal-invoke/run.sh

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
#!/bin/bash
2+
set -e
3+
4+
echo "Syncing dependencies..."
5+
uv sync
6+
7+
echo "Authenticating with UiPath..."
8+
uv run uipath auth --client-id="$CLIENT_ID" --client-secret="$CLIENT_SECRET" --base-url="$BASE_URL"
9+
10+
echo "Initializing the project..."
11+
uv run uipath init
12+
13+
echo "Packing agent..."
14+
uv run uipath pack
15+
16+
echo "Running agent..."
17+
uv run uipath run agent --file input.json
18+
19+
echo "Running agent again with empty UIPATH_JOB_KEY..."
20+
export UIPATH_JOB_KEY=""
21+
uv run uipath run agent --trace-file .uipath/traces.jsonl --file input.json >> local_run_output.log
Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
import json
2+
3+
with open("__uipath/output.json", "r", encoding="utf-8") as f:
4+
output_data = json.load(f)
5+
6+
output_content = output_data["output"]
7+
result_summary = output_content["result_summary"]
8+
9+
print(f"Success: {output_content['success']}")
10+
print(f"Summary:\n{result_summary}")
11+
12+
assert output_content["success"] is True, "Test did not succeed. See summary above."
Lines changed: 116 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,116 @@
1+
import logging
2+
3+
from langchain_core.messages import AIMessage, HumanMessage
4+
from langgraph.checkpoint.memory import MemorySaver
5+
from langgraph.graph import END, START, StateGraph, MessagesState
6+
from pydantic import BaseModel, Field
7+
8+
from uipath_langchain.agent.multimodal.invoke import llm_call_with_files
9+
from uipath_langchain.agent.multimodal.types import FileInfo
10+
from uipath_langchain.chat.chat_model_factory import get_chat_model
11+
12+
logger = logging.getLogger(__name__)
13+
14+
MODELS_TO_TEST = [
15+
"gpt-4.1-2025-04-14",
16+
"gemini-2.5-pro",
17+
"anthropic.claude-sonnet-4-5-20250929-v1:0",
18+
]
19+
20+
FILES_TO_TEST = [
21+
FileInfo(
22+
url="https://www.w3schools.com/css/img_5terre.jpg",
23+
name="img_5terre.jpg",
24+
mime_type="image/jpeg",
25+
),
26+
FileInfo(
27+
url="https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf",
28+
name="dummy.pdf",
29+
mime_type="application/pdf",
30+
),
31+
]
32+
33+
34+
class GraphInput(BaseModel):
35+
prompt: str = Field(default="Describe the content of this file in one sentence.")
36+
37+
38+
class GraphOutput(BaseModel):
39+
success: bool
40+
result_summary: str
41+
42+
43+
class GraphState(MessagesState):
44+
prompt: str
45+
success: bool
46+
result_summary: str
47+
model_results: dict
48+
49+
50+
async def run_multimodal_invoke(state: GraphState) -> dict:
51+
messages = [HumanMessage(content=state["prompt"])]
52+
model_results = {}
53+
54+
for model_name in MODELS_TO_TEST:
55+
logger.info(f"Testing {model_name}...")
56+
model = get_chat_model(
57+
model=model_name,
58+
temperature=0.0,
59+
max_tokens=200,
60+
agenthub_config="agentsplayground",
61+
)
62+
test_results = {}
63+
for file_info in FILES_TO_TEST:
64+
label = file_info.name
65+
logger.info(f" {label}...")
66+
try:
67+
response: AIMessage = await llm_call_with_files(
68+
messages, [file_info], model
69+
)
70+
logger.info(f" {label}: ✓")
71+
test_results[label] = "✓"
72+
except Exception as e:
73+
logger.error(f" {label}: ✗ {e}")
74+
test_results[label] = f"✗ {str(e)[:60]}"
75+
model_results[model_name] = test_results
76+
77+
summary_lines = []
78+
for model_name, results in model_results.items():
79+
summary_lines.append(f"{model_name}:")
80+
for file_name, result in results.items():
81+
summary_lines.append(f" {file_name}: {result}")
82+
has_failures = any(
83+
"✗" in v for results in model_results.values() for v in results.values()
84+
)
85+
86+
return {
87+
"success": not has_failures,
88+
"result_summary": "\n".join(summary_lines),
89+
"model_results": model_results,
90+
}
91+
92+
93+
async def return_results(state: GraphState) -> GraphOutput:
94+
logger.info(f"Success: {state['success']}")
95+
logger.info(f"Summary:\n{state['result_summary']}")
96+
return GraphOutput(
97+
success=state["success"],
98+
result_summary=state["result_summary"],
99+
)
100+
101+
102+
def build_graph() -> StateGraph:
103+
builder = StateGraph(GraphState, input_schema=GraphInput, output_schema=GraphOutput)
104+
105+
builder.add_node("run_multimodal_invoke", run_multimodal_invoke)
106+
builder.add_node("results", return_results)
107+
108+
builder.add_edge(START, "run_multimodal_invoke")
109+
builder.add_edge("run_multimodal_invoke", "results")
110+
builder.add_edge("results", END)
111+
112+
memory = MemorySaver()
113+
return builder.compile(checkpointer=memory)
114+
115+
116+
graph = build_graph()

0 commit comments

Comments
 (0)