Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 4 additions & 8 deletions haystack/components/agents/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,6 +215,7 @@ def translate(

# The template variables 'language' and 'document' become inputs to the run method
result = agent.run(
messages=[],
language="French",
document="The weather is lovely today and the sun is shining.",
)
Expand Down Expand Up @@ -502,7 +503,7 @@ def _create_agent_span(self) -> Any:

def _initialize_fresh_execution(
self,
messages: list[ChatMessage] | None,
messages: list[ChatMessage],
streaming_callback: StreamingCallbackT | None,
requires_async: bool,
*,
Expand Down Expand Up @@ -532,11 +533,6 @@ def _initialize_fresh_execution(
"""
user_prompt = user_prompt or self.user_prompt
system_prompt = system_prompt or self.system_prompt
if messages is None and user_prompt is None and system_prompt is None:
raise ValueError(
"No messages provided to the Agent and neither user_prompt nor system_prompt is set. "
"Please provide at least one of these inputs."
)
messages = messages or []

if user_prompt is not None:
Expand Down Expand Up @@ -744,7 +740,7 @@ def _runtime_checks(self, break_point: AgentBreakpoint | None, tools: ToolsType)

def run( # noqa: PLR0915
self,
messages: list[ChatMessage] | None = None,
messages: list[ChatMessage],
streaming_callback: StreamingCallbackT | None = None,
*,
generation_kwargs: dict[str, Any] | None = None,
Expand Down Expand Up @@ -975,7 +971,7 @@ def run( # noqa: PLR0915

async def run_async( # noqa: PLR0915
self,
messages: list[ChatMessage] | None = None,
messages: list[ChatMessage],
streaming_callback: StreamingCallbackT | None = None,
*,
generation_kwargs: dict[str, Any] | None = None,
Expand Down
4 changes: 2 additions & 2 deletions haystack/components/generators/chat/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ def run(
- "last_message": The last message exchanged during the LLM's run.
"""
return super(LLM, self).run( # noqa: UP008
messages=messages,
messages=messages or [],
streaming_callback=streaming_callback,
generation_kwargs=generation_kwargs,
system_prompt=system_prompt,
Expand Down Expand Up @@ -169,7 +169,7 @@ async def run_async(
- "last_message": The last message exchanged during the LLM's run.
"""
return await super(LLM, self).run_async( # noqa: UP008
messages=messages,
messages=messages or [],
streaming_callback=streaming_callback,
generation_kwargs=generation_kwargs,
system_prompt=system_prompt,
Expand Down
16 changes: 16 additions & 0 deletions releasenotes/notes/revert-agent-messages-e44f5005e48d371e.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
---
upgrade:
- |
``Agent.run()`` and ``Agent.run_async()`` now require ``messages`` as an explicit argument (no longer
optional). If you were relying on the default ``None`` value in Haystack version 2.26 or 2.27, pass an empty list instead:

.. code:: python

agent.run(messages=[], ...)

``LLM.run()`` and ``LLM.run_async()`` are unaffected — they still accept ``None`` and default to
an empty list internally.
fixes:
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I suggest we also include a note in the section upgrade given that this is a breaking change for users you started to use Agent without messages in the mean time. I don't expect many users to be affected if any at all.

- |
Reverts the change that made ``Agent`` messages optional as it caused issues with pipeline execution.
As a consequence, the ``LLM`` component now defaults to an empty messages list unless provided at runtime.
18 changes: 6 additions & 12 deletions test/components/agents/test_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -1517,17 +1517,10 @@ def test_system_prompt_runtime_override(self, make_agent):
assert result["messages"][0].text == "You are an Haystack expert."
assert result["messages"][1].text == "Hi"

def test_user_prompt_raises_when_no_messages_and_no_prompt(self, weather_tool):
agent = Agent(chat_generator=MockChatGenerator(), tools=[weather_tool])
with pytest.raises(
ValueError, match="No messages provided to the Agent and neither user_prompt nor system_prompt is set"
):
agent.run()

def test_user_prompt_only_variables_forwarded_to_builder(self, make_agent):
agent = make_agent(user_prompt=_user_msg("Question: {{question}}"))
# 'irrelevant_kwarg' is not a template variable — must not raise
result = agent.run(question="Will it snow?", irrelevant_kwarg="unused")
result = agent.run(messages=[], question="Will it snow?", irrelevant_kwarg="unused")
assert "messages" in result

def test_user_prompt_with_template_variables(self, make_agent):
Expand All @@ -1538,7 +1531,7 @@ def test_user_prompt_with_template_variables(self, make_agent):
+ " on {{date}}?"
)
)
result = agent.run(name="Alice", cities=["Berlin", "Paris", "Rome"], date="2024-01-15")
result = agent.run(messages=[], name="Alice", cities=["Berlin", "Paris", "Rome"], date="2024-01-15")
user_messages = [m for m in result["messages"] if m.is_from(ChatRole.USER)]
assert user_messages[0].text == "Hello ALICE, check weather for: Berlin, Paris, Rome on 2024-01-15?"

Expand All @@ -1549,7 +1542,7 @@ def test_user_prompt_with_template_variables(self, make_agent):

def test_runtime_user_prompt_overrides_init_prompt(self, make_agent):
agent = make_agent(user_prompt=_user_msg("Default prompt for {{city}}."))
result = agent.run(user_prompt=_user_msg("Runtime prompt for {{city}}."), city="Berlin")
result = agent.run(messages=[], user_prompt=_user_msg("Runtime prompt for {{city}}."), city="Berlin")
user_messages = [m for m in result["messages"] if m.is_from(ChatRole.USER)]
assert user_messages[0].text == "Runtime prompt for Berlin."

Expand Down Expand Up @@ -1580,7 +1573,7 @@ def test_system_prompt_and_user_prompt(self, make_agent):
assert agent._system_chat_prompt_builder is not None
assert agent._user_chat_prompt_builder is not None

result = agent.run(project="Haystack", topic="pipelines")
result = agent.run(messages=[], project="Haystack", topic="pipelines")
messages = result["messages"]
assert messages[0].is_from(ChatRole.SYSTEM)
assert messages[0].text == "You help users of Haystack."
Expand Down Expand Up @@ -1629,7 +1622,7 @@ def _factory(user_prompt: str | None = None):
def test_rag_pipeline_user_prompt_init_only(self, make_rag_pipeline):
pipeline = make_rag_pipeline()
query = "Where is the Colosseum?"
result = pipeline.run(data={"retriever": {"query": query}, "agent": {"query": query}})
result = pipeline.run(data={"retriever": {"query": query}, "agent": {"query": query, "messages": []}})
assert "agent" in result
agent_output = result["agent"]
assert "messages" in agent_output
Expand Down Expand Up @@ -1662,6 +1655,7 @@ def test_rag_pipeline_user_prompt_runtime_override(self, make_rag_pipeline):
"Answer: {{query}}"
),
"query": query,
"messages": [],
},
}
)
Expand Down
Loading