Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/strands/models/llamaapi.py
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,7 @@ def _format_request_messages(self, messages: Messages, system_prompt: str | None
# Filter out location sources and unsupported block types
filtered_contents = []
for content in contents:
if any(block_type in content for block_type in ["toolResult", "toolUse"]):
if any(block_type in content for block_type in ["toolResult", "toolUse", "reasoningContent"]):
continue
if _has_location_source(content):
logger.warning("Location sources are not supported by LlamaAPI | skipping content block")
Expand Down
2 changes: 1 addition & 1 deletion src/strands/models/llamacpp.py
Original file line number Diff line number Diff line change
Expand Up @@ -302,7 +302,7 @@ def _format_messages(self, messages: Messages, system_prompt: str | None = None)
# Filter out location sources and unsupported block types
filtered_contents = []
for content in contents:
if any(block_type in content for block_type in ["toolResult", "toolUse"]):
if any(block_type in content for block_type in ["toolResult", "toolUse", "reasoningContent"]):
continue
if _has_location_source(content):
logger.warning("Location sources are not supported by llama.cpp | skipping content block")
Expand Down
4 changes: 2 additions & 2 deletions src/strands/models/writer.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ def _format_content_vision(content: ContentBlock) -> dict[str, Any]:
return [
_format_content_vision(content)
for content in contents
if not any(block_type in content for block_type in ["toolResult", "toolUse"])
if not any(block_type in content for block_type in ["toolResult", "toolUse", "reasoningContent"])
]

def _format_request_message_contents(self, contents: list[ContentBlock]) -> str:
Expand All @@ -142,7 +142,7 @@ def _format_content(content: ContentBlock) -> str:
content_blocks = list(
filter(
lambda content: content.get("text")
and not any(block_type in content for block_type in ["toolResult", "toolUse"]),
and not any(block_type in content for block_type in ["toolResult", "toolUse", "reasoningContent"]),
contents,
)
)
Expand Down
20 changes: 20 additions & 0 deletions tests/strands/models/test_llamaapi.py
Original file line number Diff line number Diff line change
Expand Up @@ -241,6 +241,26 @@ def test_format_request_with_unsupported_type(model):
model.format_request(messages)


def test_format_request_skips_reasoning_content(model):
"""Test that reasoningContent blocks from other providers are silently skipped."""
messages = [
{
"role": "user",
"content": [
{"text": "Hello"},
{"reasoningContent": {"reasoningText": {"text": "Let me think...", "signature": "sig"}}},
],
},
]

# Should not raise — reasoningContent is silently dropped
request = model.format_request(messages)
# Only the text block should appear in the formatted request
user_messages = [m for m in request["messages"] if m["role"] == "user"]
assert len(user_messages) == 1
assert user_messages[0]["content"] == [{"type": "text", "text": "Hello"}]


def test_format_chunk_message_start(model):
event = {"chunk_type": "message_start"}

Expand Down
22 changes: 22 additions & 0 deletions tests/strands/models/test_llamacpp.py
Original file line number Diff line number Diff line change
Expand Up @@ -706,3 +706,25 @@ def test_format_request_filters_location_source_document(caplog) -> None:
assert len(user_content) == 1
assert user_content[0]["type"] == "text"
assert "Location sources are not supported by llama.cpp" in caplog.text


def test_format_request_skips_reasoning_content() -> None:
"""Test that reasoningContent blocks from other providers are silently skipped."""
model = LlamaCppModel()

messages = [
{
"role": "user",
"content": [
{"text": "Hello"},
{"reasoningContent": {"reasoningText": {"text": "Let me think...", "signature": "sig"}}},
],
},
]

# Should not raise — reasoningContent is silently dropped
request = model._format_request(messages)
# Only the text block should appear in the formatted request
user_messages = [m for m in request["messages"] if m["role"] == "user"]
assert len(user_messages) == 1
assert user_messages[0]["content"] == [{"type": "text", "text": "Hello"}]
17 changes: 16 additions & 1 deletion tests/strands/models/test_writer.py
Original file line number Diff line number Diff line change
Expand Up @@ -250,7 +250,6 @@ def test_format_request_with_empty_content(model, model_id, stream_options):
[
({"video": {}}, "video"),
({"document": {}}, "document"),
({"reasoningContent": {}}, "reasoningContent"),
({"other": {}}, "other"),
],
)
Expand All @@ -266,6 +265,22 @@ def test_format_request_with_unsupported_type(model, content, content_type):
model.format_request(messages)


def test_format_request_skips_reasoning_content(model):
"""Test that reasoningContent blocks from other providers are silently skipped."""
messages = [
{
"role": "user",
"content": [
{"text": "Hello"},
{"reasoningContent": {"reasoningText": {"text": "Let me think...", "signature": "sig"}}},
],
},
]

# Should not raise — reasoningContent is silently dropped
model.format_request(messages)


class AsyncStreamWrapper:
def __init__(self, items: list[Any]):
self.items = items
Expand Down