Skip to content

Commit bdcc717

Browse files
authored
fix: handle OpenAI model responses with tool calls and no other assistant content (strands-agents#1562)
1 parent 2d8c20e commit bdcc717

5 files changed

Lines changed: 190 additions & 6 deletions

File tree

src/strands/models/litellm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -194,7 +194,7 @@ def format_request_messages(
194194
formatted_messages = cls._format_system_messages(system_prompt, system_prompt_content=system_prompt_content)
195195
formatted_messages.extend(cls._format_regular_messages(messages))
196196

197-
return [message for message in formatted_messages if message["content"] or "tool_calls" in message]
197+
return [message for message in formatted_messages if "content" in message or "tool_calls" in message]
198198

199199
@override
200200
def format_chunk(self, event: dict[str, Any], **kwargs: Any) -> StreamEvent:

src/strands/models/openai.py

Lines changed: 15 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -204,10 +204,18 @@ def format_request_tool_message(cls, tool_result: ToolResult, **kwargs: Any) ->
204204
],
205205
)
206206

207+
formatted_contents = [cls.format_request_message_content(content) for content in contents]
208+
209+
# If single text content, use string format for better model compatibility
210+
if len(formatted_contents) == 1 and formatted_contents[0].get("type") == "text":
211+
content: str | list[dict[str, Any]] = formatted_contents[0]["text"]
212+
else:
213+
content = formatted_contents
214+
207215
return {
208216
"role": "tool",
209217
"tool_call_id": tool_result["toolUseId"],
210-
"content": [cls.format_request_message_content(content) for content in contents],
218+
"content": content,
211219
}
212220

213221
@classmethod
@@ -369,18 +377,21 @@ def _format_regular_messages(cls, messages: Messages, **kwargs: Any) -> list[dic
369377

370378
formatted_message = {
371379
"role": message["role"],
372-
"content": formatted_contents,
380+
**({"content": formatted_contents} if formatted_contents else {}),
373381
**({"tool_calls": formatted_tool_calls} if formatted_tool_calls else {}),
374382
}
375383
formatted_messages.append(formatted_message)
376384

377385
# Process tool messages to extract images into separate user messages
378386
# OpenAI API requires images to be in user role messages only
387+
# All tool messages must be grouped together before any user messages with images
388+
user_messages_with_images = []
379389
for tool_msg in formatted_tool_messages:
380390
tool_msg_clean, user_msg_with_images = cls._split_tool_message_images(tool_msg)
381391
formatted_messages.append(tool_msg_clean)
382392
if user_msg_with_images:
383-
formatted_messages.append(user_msg_with_images)
393+
user_messages_with_images.append(user_msg_with_images)
394+
formatted_messages.extend(user_messages_with_images)
384395

385396
return formatted_messages
386397

@@ -407,7 +418,7 @@ def format_request_messages(
407418
formatted_messages = cls._format_system_messages(system_prompt, system_prompt_content=system_prompt_content)
408419
formatted_messages.extend(cls._format_regular_messages(messages))
409420

410-
return [message for message in formatted_messages if message["content"] or "tool_calls" in message]
421+
return [message for message in formatted_messages if "content" in message or "tool_calls" in message]
411422

412423
def format_request(
413424
self,

tests/strands/models/test_litellm.py

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -812,3 +812,39 @@ def __init__(self, usage):
812812
assert metadata_events[0]["metadata"]["usage"]["inputTokens"] == 10
813813
assert metadata_events[0]["metadata"]["usage"]["outputTokens"] == 5
814814
assert metadata_events[0]["metadata"]["usage"]["totalTokens"] == 15
815+
816+
817+
def test_format_request_messages_with_tool_calls_no_content():
818+
"""Test that assistant messages with only tool calls are included and have no content field."""
819+
messages = [
820+
{"role": "user", "content": [{"text": "Use the calculator"}]},
821+
{
822+
"role": "assistant",
823+
"content": [
824+
{
825+
"toolUse": {
826+
"input": {"expression": "2+2"},
827+
"name": "calculator",
828+
"toolUseId": "c1",
829+
},
830+
},
831+
],
832+
},
833+
]
834+
835+
tru_result = LiteLLMModel.format_request_messages(messages)
836+
837+
exp_result = [
838+
{"role": "user", "content": [{"text": "Use the calculator", "type": "text"}]},
839+
{
840+
"role": "assistant",
841+
"tool_calls": [
842+
{
843+
"function": {"arguments": '{"expression": "2+2"}', "name": "calculator"},
844+
"id": "c1",
845+
"type": "function",
846+
}
847+
],
848+
},
849+
]
850+
assert tru_result == exp_result

tests/strands/models/test_openai.py

Lines changed: 137 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -180,6 +180,23 @@ def test_format_request_tool_message():
180180
assert tru_result == exp_result
181181

182182

183+
def test_format_request_tool_message_single_text_returns_string():
184+
"""Test that single text content is returned as string for model compatibility."""
185+
tool_result = {
186+
"content": [{"text": '{"result": "success"}'}],
187+
"status": "success",
188+
"toolUseId": "c1",
189+
}
190+
191+
tru_result = OpenAIModel.format_request_tool_message(tool_result)
192+
exp_result = {
193+
"content": '{"result": "success"}',
194+
"role": "tool",
195+
"tool_call_id": "c1",
196+
}
197+
assert tru_result == exp_result
198+
199+
183200
def test_split_tool_message_images_with_image():
184201
"""Test that images are extracted from tool messages."""
185202
tool_message = {
@@ -441,7 +458,7 @@ def test_format_request_messages(system_prompt):
441458
],
442459
},
443460
{
444-
"content": [{"text": "4", "type": "text"}],
461+
"content": "4",
445462
"role": "tool",
446463
"tool_call_id": "c1",
447464
},
@@ -1397,3 +1414,122 @@ def test_format_request_filters_location_source_document(model, caplog):
13971414
assert len(formatted_content) == 1
13981415
assert formatted_content[0]["type"] == "text"
13991416
assert "Location sources are not supported by OpenAI" in caplog.text
1417+
1418+
1419+
def test_format_request_messages_with_tool_calls_no_content():
1420+
"""Test that assistant messages with only tool calls are included and have no content field."""
1421+
messages = [
1422+
{"role": "user", "content": [{"text": "Use the calculator"}]},
1423+
{
1424+
"role": "assistant",
1425+
"content": [
1426+
{
1427+
"toolUse": {
1428+
"input": {"expression": "2+2"},
1429+
"name": "calculator",
1430+
"toolUseId": "c1",
1431+
},
1432+
},
1433+
],
1434+
},
1435+
]
1436+
1437+
tru_result = OpenAIModel.format_request_messages(messages)
1438+
1439+
exp_result = [
1440+
{"role": "user", "content": [{"text": "Use the calculator", "type": "text"}]},
1441+
{
1442+
"role": "assistant",
1443+
"tool_calls": [
1444+
{
1445+
"function": {"arguments": '{"expression": "2+2"}', "name": "calculator"},
1446+
"id": "c1",
1447+
"type": "function",
1448+
}
1449+
],
1450+
},
1451+
]
1452+
assert tru_result == exp_result
1453+
1454+
1455+
def test_format_request_messages_multiple_tool_calls_with_images():
1456+
"""Test that multiple tool calls with image results are formatted correctly.
1457+
1458+
OpenAI requires all tool response messages to immediately follow the assistant
1459+
message with tool_calls, before any other messages. When tools return images,
1460+
the images are moved to user messages, but these must come after ALL tool messages.
1461+
"""
1462+
messages = [
1463+
{"role": "user", "content": [{"text": "Run the tools"}]},
1464+
{
1465+
"role": "assistant",
1466+
"content": [
1467+
{"toolUse": {"input": {}, "name": "tool1", "toolUseId": "call_1"}},
1468+
{"toolUse": {"input": {}, "name": "tool2", "toolUseId": "call_2"}},
1469+
],
1470+
},
1471+
{
1472+
"role": "user",
1473+
"content": [
1474+
{
1475+
"toolResult": {
1476+
"toolUseId": "call_1",
1477+
"content": [{"image": {"format": "png", "source": {"bytes": b"img1"}}}],
1478+
"status": "success",
1479+
}
1480+
},
1481+
{
1482+
"toolResult": {
1483+
"toolUseId": "call_2",
1484+
"content": [{"image": {"format": "png", "source": {"bytes": b"img2"}}}],
1485+
"status": "success",
1486+
}
1487+
},
1488+
],
1489+
},
1490+
]
1491+
1492+
tru_result = OpenAIModel.format_request_messages(messages)
1493+
1494+
image_placeholder = (
1495+
"Tool successfully returned an image. The image is being provided in the following user message."
1496+
)
1497+
exp_result = [
1498+
{"role": "user", "content": [{"text": "Run the tools", "type": "text"}]},
1499+
{
1500+
"role": "assistant",
1501+
"tool_calls": [
1502+
{"function": {"arguments": "{}", "name": "tool1"}, "id": "call_1", "type": "function"},
1503+
{"function": {"arguments": "{}", "name": "tool2"}, "id": "call_2", "type": "function"},
1504+
],
1505+
},
1506+
{
1507+
"role": "tool",
1508+
"tool_call_id": "call_1",
1509+
"content": [{"type": "text", "text": image_placeholder}],
1510+
},
1511+
{
1512+
"role": "tool",
1513+
"tool_call_id": "call_2",
1514+
"content": [{"type": "text", "text": image_placeholder}],
1515+
},
1516+
{
1517+
"role": "user",
1518+
"content": [
1519+
{
1520+
"image_url": {"detail": "auto", "format": "image/png", "url": "data:image/png;base64,aW1nMQ=="},
1521+
"type": "image_url",
1522+
}
1523+
],
1524+
},
1525+
{
1526+
"role": "user",
1527+
"content": [
1528+
{
1529+
"image_url": {"detail": "auto", "format": "image/png", "url": "data:image/png;base64,aW1nMg=="},
1530+
"type": "image_url",
1531+
}
1532+
],
1533+
},
1534+
]
1535+
assert tru_result == exp_result

tests_integ/test_multiagent_swarm.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -113,6 +113,7 @@ def capture_first_node(self, event):
113113
return VerifyHook()
114114

115115

116+
@pytest.mark.timeout(120)
116117
def test_swarm_execution_with_string(researcher_agent, analyst_agent, writer_agent, hook_provider):
117118
"""Test swarm execution with string input."""
118119
# Create the swarm

0 commit comments

Comments
 (0)