|
| 1 | +"""Memory-aware coded agent. |
| 2 | +
|
| 3 | +Searches a UiPath Agent Memory space for prior interactions relevant to |
| 4 | +the user's query, stitches the LLMOps-rendered few-shot injection into |
| 5 | +the LLM system prompt, then calls the UiPath LLM Gateway. |
| 6 | +
|
| 7 | +Required environment variables: |
| 8 | + UIPATH_URL UiPath base URL (e.g. https://cloud.uipath.com/org/tenant) |
| 9 | + UIPATH_ACCESS_TOKEN Personal access token or service token |
| 10 | + UIPATH_FOLDER_KEY Folder that owns the memory space (or pass folder_path) |
| 11 | +
|
| 12 | +The memory space itself is created out-of-band (UI, CLI, or |
| 13 | +``uipath.memory.create``) and its id is passed in via ``AgentInput``. |
| 14 | +""" |
| 15 | + |
| 16 | +import os |
| 17 | + |
| 18 | +from dotenv import load_dotenv |
| 19 | +from pydantic import BaseModel, Field |
| 20 | + |
| 21 | +from uipath.platform import UiPath |
| 22 | +from uipath.platform.chat import ChatModels |
| 23 | +from uipath.platform.memory import ( |
| 24 | + MemorySearchRequest, |
| 25 | + SearchField, |
| 26 | + SearchMode, |
| 27 | + SearchSettings, |
| 28 | +) |
| 29 | +from uipath.tracing import traced |
| 30 | + |
| 31 | +load_dotenv() |
| 32 | + |
| 33 | +BASE_SYSTEM_PROMPT = "You answer questions concisely and accurately." |
| 34 | + |
| 35 | + |
| 36 | +class AgentInput(BaseModel): |
| 37 | + """Input model for the memory-aware agent.""" |
| 38 | + |
| 39 | + query: str = Field(description="User question") |
| 40 | + memory_space_id: str = Field( |
| 41 | + description="ID of the memory space to recall from (folder-scoped)" |
| 42 | + ) |
| 43 | + |
| 44 | + |
| 45 | +class AgentOutput(BaseModel): |
| 46 | + """Output model for the memory-aware agent.""" |
| 47 | + |
| 48 | + response: str = Field(description="Final LLM answer") |
| 49 | + matched_memories: int = Field( |
| 50 | + default=0, description="Number of memories returned by the search" |
| 51 | + ) |
| 52 | + system_prompt_injection: str = Field( |
| 53 | + default="", description="The few-shot block stitched into the system prompt" |
| 54 | + ) |
| 55 | + |
| 56 | + |
| 57 | +@traced() |
| 58 | +async def main(input: AgentInput) -> AgentOutput: |
| 59 | + """Recall memories, augment the system prompt, then call the LLM.""" |
| 60 | + base_url = os.environ.get("UIPATH_URL") |
| 61 | + access_token = os.environ.get("UIPATH_ACCESS_TOKEN") |
| 62 | + folder_key = os.environ.get("UIPATH_FOLDER_KEY") |
| 63 | + |
| 64 | + if not base_url or not access_token: |
| 65 | + return AgentOutput( |
| 66 | + response=( |
| 67 | + "Missing required environment variables. " |
| 68 | + "Set UIPATH_URL and UIPATH_ACCESS_TOKEN." |
| 69 | + ), |
| 70 | + ) |
| 71 | + |
| 72 | + sdk = UiPath() |
| 73 | + |
| 74 | + # 1. Recall relevant prior interactions for this query. |
| 75 | + search_req = MemorySearchRequest( |
| 76 | + fields=[SearchField(key_path=["query"], value=input.query)], |
| 77 | + settings=SearchSettings( |
| 78 | + threshold=0.5, |
| 79 | + result_count=3, |
| 80 | + search_mode=SearchMode.Hybrid, |
| 81 | + ), |
| 82 | + definition_system_prompt=BASE_SYSTEM_PROMPT, |
| 83 | + ) |
| 84 | + recall = await sdk.memory.search_async( |
| 85 | + memory_space_id=input.memory_space_id, |
| 86 | + request=search_req, |
| 87 | + folder_key=folder_key, |
| 88 | + ) |
| 89 | + |
| 90 | + # 2. Stitch the LLMOps-rendered few-shot injection into the system prompt. |
| 91 | + system_prompt = BASE_SYSTEM_PROMPT |
| 92 | + if recall.system_prompt_injection: |
| 93 | + system_prompt = f"{BASE_SYSTEM_PROMPT}\n\n{recall.system_prompt_injection}" |
| 94 | + |
| 95 | + # 3. Call the LLM with the augmented system prompt. |
| 96 | + chat = await sdk.llm.chat_completions( |
| 97 | + messages=[ |
| 98 | + {"role": "system", "content": system_prompt}, |
| 99 | + {"role": "user", "content": input.query}, |
| 100 | + ], |
| 101 | + model=ChatModels.gpt_4_1_mini_2025_04_14, |
| 102 | + max_tokens=400, |
| 103 | + temperature=0.2, |
| 104 | + ) |
| 105 | + |
| 106 | + answer = chat.choices[0].message.content or "" |
| 107 | + return AgentOutput( |
| 108 | + response=answer, |
| 109 | + matched_memories=len(recall.results), |
| 110 | + system_prompt_injection=recall.system_prompt_injection, |
| 111 | + ) |
0 commit comments