|
| 1 | +import logging |
| 2 | + |
| 3 | +from langchain_core.messages import AIMessage, HumanMessage |
| 4 | +from langgraph.checkpoint.memory import MemorySaver |
| 5 | +from langgraph.graph import END, START, StateGraph, MessagesState |
| 6 | +from pydantic import BaseModel, Field |
| 7 | + |
| 8 | +from uipath_langchain.agent.multimodal.invoke import llm_call_with_files |
| 9 | +from uipath_langchain.agent.multimodal.types import FileInfo |
| 10 | +from uipath_langchain.chat.chat_model_factory import get_chat_model |
| 11 | + |
| 12 | +logger = logging.getLogger(__name__) |
| 13 | + |
| 14 | +MODELS_TO_TEST = [ |
| 15 | + "gpt-4.1-2025-04-14", |
| 16 | + "gemini-2.5-pro", |
| 17 | + "anthropic.claude-sonnet-4-5-20250929-v1:0", |
| 18 | +] |
| 19 | + |
| 20 | +FILES_TO_TEST = [ |
| 21 | + FileInfo( |
| 22 | + url="https://www.w3schools.com/css/img_5terre.jpg", |
| 23 | + name="img_5terre.jpg", |
| 24 | + mime_type="image/jpeg", |
| 25 | + ), |
| 26 | + FileInfo( |
| 27 | + url="https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf", |
| 28 | + name="dummy.pdf", |
| 29 | + mime_type="application/pdf", |
| 30 | + ), |
| 31 | +] |
| 32 | + |
| 33 | + |
| 34 | +class GraphInput(BaseModel): |
| 35 | + prompt: str = Field(default="Describe the content of this file in one sentence.") |
| 36 | + |
| 37 | + |
| 38 | +class GraphOutput(BaseModel): |
| 39 | + success: bool |
| 40 | + result_summary: str |
| 41 | + |
| 42 | + |
| 43 | +class GraphState(MessagesState): |
| 44 | + prompt: str |
| 45 | + success: bool |
| 46 | + result_summary: str |
| 47 | + model_results: dict |
| 48 | + |
| 49 | + |
| 50 | +async def run_multimodal_invoke(state: GraphState) -> dict: |
| 51 | + messages = [HumanMessage(content=state["prompt"])] |
| 52 | + model_results = {} |
| 53 | + |
| 54 | + for model_name in MODELS_TO_TEST: |
| 55 | + logger.info(f"Testing {model_name}...") |
| 56 | + model = get_chat_model( |
| 57 | + model=model_name, |
| 58 | + temperature=0.0, |
| 59 | + max_tokens=200, |
| 60 | + agenthub_config="agentsplayground", |
| 61 | + ) |
| 62 | + test_results = {} |
| 63 | + for file_info in FILES_TO_TEST: |
| 64 | + label = file_info.name |
| 65 | + logger.info(f" {label}...") |
| 66 | + try: |
| 67 | + response: AIMessage = await llm_call_with_files( |
| 68 | + messages, [file_info], model |
| 69 | + ) |
| 70 | + logger.info(f" {label}: ✓") |
| 71 | + test_results[label] = "✓" |
| 72 | + except Exception as e: |
| 73 | + logger.error(f" {label}: ✗ {e}") |
| 74 | + test_results[label] = f"✗ {str(e)[:60]}" |
| 75 | + model_results[model_name] = test_results |
| 76 | + |
| 77 | + summary_lines = [] |
| 78 | + for model_name, results in model_results.items(): |
| 79 | + summary_lines.append(f"{model_name}:") |
| 80 | + for file_name, result in results.items(): |
| 81 | + summary_lines.append(f" {file_name}: {result}") |
| 82 | + has_failures = any( |
| 83 | + "✗" in v for results in model_results.values() for v in results.values() |
| 84 | + ) |
| 85 | + |
| 86 | + return { |
| 87 | + "success": not has_failures, |
| 88 | + "result_summary": "\n".join(summary_lines), |
| 89 | + "model_results": model_results, |
| 90 | + } |
| 91 | + |
| 92 | + |
| 93 | +async def return_results(state: GraphState) -> GraphOutput: |
| 94 | + logger.info(f"Success: {state['success']}") |
| 95 | + logger.info(f"Summary:\n{state['result_summary']}") |
| 96 | + return GraphOutput( |
| 97 | + success=state["success"], |
| 98 | + result_summary=state["result_summary"], |
| 99 | + ) |
| 100 | + |
| 101 | + |
| 102 | +def build_graph() -> StateGraph: |
| 103 | + builder = StateGraph(GraphState, input_schema=GraphInput, output_schema=GraphOutput) |
| 104 | + |
| 105 | + builder.add_node("run_multimodal_invoke", run_multimodal_invoke) |
| 106 | + builder.add_node("results", return_results) |
| 107 | + |
| 108 | + builder.add_edge(START, "run_multimodal_invoke") |
| 109 | + builder.add_edge("run_multimodal_invoke", "results") |
| 110 | + builder.add_edge("results", END) |
| 111 | + |
| 112 | + memory = MemorySaver() |
| 113 | + return builder.compile(checkpointer=memory) |
| 114 | + |
| 115 | + |
| 116 | +graph = build_graph() |
0 commit comments