Skip to content

Commit 15279f1

Browse files
feat: conversational agent support
1 parent 7a9bbb5 commit 15279f1

File tree

9 files changed

+141
-62
lines changed

9 files changed

+141
-62
lines changed

src/uipath_langchain/agent/react/agent.py

Lines changed: 25 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525
from .router import (
2626
create_route_agent,
2727
)
28+
from .router_conversational import create_route_agent_conversational
2829
from .terminate_node import (
2930
create_terminate_node,
3031
)
@@ -74,16 +75,18 @@ def create_agent(
7475
os.environ["LANGCHAIN_RECURSION_LIMIT"] = str(config.recursion_limit)
7576

7677
agent_tools = list(tools)
77-
flow_control_tools: list[BaseTool] = create_flow_control_tools(output_schema)
78+
flow_control_tools: list[BaseTool] = (
79+
[] if config.is_conversational else create_flow_control_tools(output_schema)
80+
)
7881
llm_tools: list[BaseTool] = [*agent_tools, *flow_control_tools]
7982

80-
init_node = create_init_node(messages, input_schema)
83+
init_node = create_init_node(messages, input_schema, config.is_conversational)
8184

8285
tool_nodes = create_tool_node(agent_tools)
8386
tool_nodes_with_guardrails = create_tools_guardrails_subgraph(
8487
tool_nodes, guardrails
8588
)
86-
terminate_node = create_terminate_node(output_schema)
89+
terminate_node = create_terminate_node(output_schema, config.is_conversational)
8790

8891
CompleteAgentGraphState = create_state_with_input(
8992
input_schema if input_schema is not None else BaseModel
@@ -109,19 +112,35 @@ def create_agent(
109112

110113
builder.add_edge(START, AgentGraphNode.INIT)
111114

112-
llm_node = create_llm_node(model, llm_tools, config.thinking_messages_limit)
115+
llm_node = create_llm_node(
116+
model, llm_tools, config.thinking_messages_limit, config.is_conversational
117+
)
113118
llm_with_guardrails_subgraph = create_llm_guardrails_subgraph(
114119
(AgentGraphNode.LLM, llm_node), guardrails
115120
)
116121
builder.add_node(AgentGraphNode.AGENT, llm_with_guardrails_subgraph)
117122
builder.add_edge(AgentGraphNode.INIT, AgentGraphNode.AGENT)
118123

119124
tool_node_names = list(tool_nodes_with_guardrails.keys())
120-
route_agent = create_route_agent(config.thinking_messages_limit)
125+
126+
if config.is_conversational:
127+
route_agent = create_route_agent_conversational()
128+
target_node_names = [
129+
*tool_node_names,
130+
AgentGraphNode.TERMINATE,
131+
]
132+
else:
133+
route_agent = create_route_agent(config.thinking_messages_limit)
134+
target_node_names = [
135+
AgentGraphNode.AGENT,
136+
*tool_node_names,
137+
AgentGraphNode.TERMINATE,
138+
]
139+
121140
builder.add_conditional_edges(
122141
AgentGraphNode.AGENT,
123142
route_agent,
124-
[AgentGraphNode.AGENT, *tool_node_names, AgentGraphNode.TERMINATE],
143+
target_node_names,
125144
)
126145

127146
for tool_name in tool_node_names:

src/uipath_langchain/agent/react/init_node.py

Lines changed: 18 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
from typing import Any, Callable, Sequence
44

55
from langchain_core.messages import HumanMessage, SystemMessage
6+
from langgraph.types import Overwrite
67
from pydantic import BaseModel
78

89
from .job_attachments import (
@@ -14,12 +15,26 @@ def create_init_node(
1415
messages: Sequence[SystemMessage | HumanMessage]
1516
| Callable[[Any], Sequence[SystemMessage | HumanMessage]],
1617
input_schema: type[BaseModel] | None,
18+
is_conversational: bool = False,
1719
):
1820
def graph_state_init(state: Any) -> Any:
21+
resolved_messages: Sequence[SystemMessage | HumanMessage] | Overwrite
1922
if callable(messages):
20-
resolved_messages = messages(state)
23+
resolved_messages = list(messages(state))
2124
else:
22-
resolved_messages = messages
25+
resolved_messages = list(messages)
26+
if is_conversational:
27+
# For conversational agents we need to reorder the messages so that the system message is first, followed by
28+
# the initial user message. When resuming the conversation, the state will have the entire message history,
29+
# including the system message. In this case, we need to replace the system message from the state with the
30+
# newly generated one. It will have the current date/time and reflect any changes to user settings. The add
31+
# reducer is used for the messages property in the state, so by default new messages are appended to the end
32+
# and using Overwrite will cause LangGraph to replace the entire array instead.
33+
if len(state.messages) > 0 and isinstance(state.messages[0], SystemMessage):
34+
preserved_messages = state.messages[1:]
35+
else:
36+
preserved_messages = state.messages
37+
resolved_messages = Overwrite([*resolved_messages, *preserved_messages])
2338

2439
schema = input_schema if input_schema is not None else BaseModel
2540
job_attachments = get_job_attachments(schema, state)
@@ -28,7 +43,7 @@ def graph_state_init(state: Any) -> Any:
2843
}
2944

3045
return {
31-
"messages": list(resolved_messages),
46+
"messages": resolved_messages,
3247
"inner_state": {
3348
"job_attachments": job_attachments_dict,
3449
},

src/uipath_langchain/agent/react/llm_node.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@ def create_llm_node(
3737
model: BaseChatModel,
3838
tools: Sequence[BaseTool] | None = None,
3939
thinking_messages_limit: int = MAX_CONSECUTIVE_THINKING_MESSAGES,
40+
is_conversational: bool = False,
4041
):
4142
"""Create LLM node with dynamic tool_choice enforcement.
4243
@@ -58,7 +59,11 @@ async def llm_node(state: AgentGraphState):
5859

5960
consecutive_thinking_messages = count_consecutive_thinking_messages(messages)
6061

61-
if bindable_tools and consecutive_thinking_messages >= thinking_messages_limit:
62+
if (
63+
not is_conversational
64+
and bindable_tools
65+
and consecutive_thinking_messages >= thinking_messages_limit
66+
):
6267
llm = base_llm.bind(tool_choice=tool_choice_required_value)
6368
else:
6469
llm = base_llm

src/uipath_langchain/agent/react/router.py

Lines changed: 3 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,11 @@
22

33
from typing import Literal
44

5-
from langchain_core.messages import AIMessage, AnyMessage, ToolCall
5+
from langchain_core.messages import ToolCall
66
from uipath.agent.react import END_EXECUTION_TOOL, RAISE_ERROR_TOOL
77

88
from ..exceptions import AgentNodeRoutingException
9+
from .router_utils import validate_last_message_is_AI
910
from .types import AgentGraphNode, AgentGraphState
1011
from .utils import count_consecutive_thinking_messages
1112

@@ -27,26 +28,6 @@ def __has_control_flow_tool(tool_calls: list[ToolCall]) -> bool:
2728
return any(tc.get("name") in FLOW_CONTROL_TOOLS for tc in tool_calls)
2829

2930

30-
def __validate_last_message_is_AI(messages: list[AnyMessage]) -> AIMessage:
31-
"""Validate and return last message from state.
32-
33-
Raises:
34-
AgentNodeRoutingException: If messages are empty or last message is not AIMessage
35-
"""
36-
if not messages:
37-
raise AgentNodeRoutingException(
38-
"No messages in state - cannot route after agent"
39-
)
40-
41-
last_message = messages[-1]
42-
if not isinstance(last_message, AIMessage):
43-
raise AgentNodeRoutingException(
44-
f"Last message is not AIMessage (type: {type(last_message).__name__}) - cannot route after agent"
45-
)
46-
47-
return last_message
48-
49-
5031
def create_route_agent(thinking_messages_limit: int = 0):
5132
"""Create a routing function configured with thinking_messages_limit.
5233
@@ -77,7 +58,7 @@ def route_agent(
7758
AgentNodeRoutingException: When encountering unexpected state (empty messages, non-AIMessage, or excessive completions)
7859
"""
7960
messages = state.messages
80-
last_message = __validate_last_message_is_AI(messages)
61+
last_message = validate_last_message_is_AI(messages)
8162

8263
tool_calls = list(last_message.tool_calls) if last_message.tool_calls else []
8364
tool_calls = __filter_control_flow_tool_calls(tool_calls)
Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
"""Routing functions for conditional edges in the agent graph."""
2+
3+
import logging
4+
from typing import Literal
5+
6+
from uipath_langchain.agent.react.router_utils import validate_last_message_is_AI
7+
8+
from .types import AgentGraphNode, AgentGraphState
9+
10+
logger = logging.getLogger(__name__)
11+
12+
13+
def create_route_agent_conversational():
14+
"""Create a routing function for conversational agents. It routes between agent and tool calls until
15+
the agent response has no tool calls, then it routes to the USER_MESSAGE_WAIT node which does an interrupt.
16+
17+
Returns:
18+
Routing function for LangGraph conditional edges
19+
"""
20+
21+
def route_agent_conversational(
22+
state: AgentGraphState,
23+
) -> list[str] | Literal[AgentGraphNode.TERMINATE]:
24+
"""Route after agent
25+
26+
Routing logic:
27+
3. If tool calls, route to specific tool nodes (return list of tool names)
28+
4. If no tool calls, route to user message wait node
29+
30+
Returns:
31+
- list[str]: Tool node names for parallel execution
32+
- AgentGraphNode.USER_MESSAGE_WAIT: When there are no tool calls
33+
34+
Raises:
35+
AgentNodeRoutingException: When encountering unexpected state (empty messages, non-AIMessage, or excessive completions)
36+
"""
37+
last_message = validate_last_message_is_AI(state.messages)
38+
if last_message.tool_calls:
39+
return [tc["name"] for tc in last_message.tool_calls]
40+
else:
41+
return AgentGraphNode.TERMINATE
42+
43+
return route_agent_conversational
Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
"""Routing functions for conditional edges in the agent graph."""
2+
3+
from langchain_core.messages import AIMessage, AnyMessage
4+
5+
from ..exceptions import AgentNodeRoutingException
6+
7+
8+
def validate_last_message_is_AI(messages: list[AnyMessage]) -> AIMessage:
9+
"""Validate and return last message from state.
10+
11+
Raises:
12+
AgentNodeRoutingException: If messages are empty or last message is not AIMessage
13+
"""
14+
if not messages:
15+
raise AgentNodeRoutingException(
16+
"No messages in state - cannot route after agent"
17+
)
18+
19+
last_message = messages[-1]
20+
if not isinstance(last_message, AIMessage):
21+
raise AgentNodeRoutingException(
22+
f"Last message is not AIMessage (type: {type(last_message).__name__}) - cannot route after agent"
23+
)
24+
25+
return last_message

src/uipath_langchain/agent/react/terminate_node.py

Lines changed: 16 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ def _handle_agent_termination(termination: AgentTermination) -> NoReturn:
4646

4747

4848
def create_terminate_node(
49-
response_schema: type[BaseModel] | None = None,
49+
response_schema: type[BaseModel] | None = None, is_conversational: bool = False
5050
):
5151
"""Handles Agent Graph termination for multiple sources and output or error propagation to Orchestrator.
5252
@@ -60,23 +60,24 @@ def terminate_node(state: AgentGraphState):
6060
if state.inner_state.termination:
6161
_handle_agent_termination(state.inner_state.termination)
6262

63-
last_message = state.messages[-1]
64-
if not isinstance(last_message, AIMessage):
65-
raise AgentNodeRoutingException(
66-
f"Expected last message to be AIMessage, got {type(last_message).__name__}"
67-
)
63+
if not is_conversational:
64+
last_message = state.messages[-1]
65+
if not isinstance(last_message, AIMessage):
66+
raise AgentNodeRoutingException(
67+
f"Expected last message to be AIMessage, got {type(last_message).__name__}"
68+
)
6869

69-
for tool_call in last_message.tool_calls:
70-
tool_name = tool_call["name"]
70+
for tool_call in last_message.tool_calls:
71+
tool_name = tool_call["name"]
7172

72-
if tool_name == END_EXECUTION_TOOL.name:
73-
return _handle_end_execution(tool_call["args"], response_schema)
73+
if tool_name == END_EXECUTION_TOOL.name:
74+
return _handle_end_execution(tool_call["args"], response_schema)
7475

75-
if tool_name == RAISE_ERROR_TOOL.name:
76-
_handle_raise_error(tool_call["args"])
76+
if tool_name == RAISE_ERROR_TOOL.name:
77+
_handle_raise_error(tool_call["args"])
7778

78-
raise AgentNodeRoutingException(
79-
"No control flow tool call found in terminate node. Unexpected state."
80-
)
79+
raise AgentNodeRoutingException(
80+
"No control flow tool call found in terminate node. Unexpected state."
81+
)
8182

8283
return terminate_node

src/uipath_langchain/agent/react/types.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -61,3 +61,6 @@ class AgentGraphConfig(BaseModel):
6161
ge=0,
6262
description="Max consecutive thinking messages before enforcing tool usage. 0 = force tools every time.",
6363
)
64+
is_conversational: bool = Field(
65+
default=False, description="If set, creates a graph for conversational agents"
66+
)

src/uipath_langchain/chat/mapper.py

Lines changed: 2 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -307,21 +307,8 @@ def map_event(
307307
)
308308
]
309309

310-
# --- Fallback for other BaseMessage types ---
311-
text_content = self._extract_text(message.content)
312-
return [
313-
UiPathConversationMessageEvent(
314-
message_id=message.id,
315-
start=UiPathConversationMessageStartEvent(
316-
role="assistant", timestamp=timestamp
317-
),
318-
content_part=UiPathConversationContentPartEvent(
319-
content_part_id=f"cp-{message.id}",
320-
chunk=UiPathConversationContentPartChunkEvent(data=text_content),
321-
),
322-
end=UiPathConversationMessageEndEvent(),
323-
)
324-
]
310+
# Don't send events for system or user messages. Agent messages are handled above.
311+
return []
325312

326313

327314
__all__ = ["UiPathChatMessagesMapper"]

0 commit comments

Comments
 (0)