Skip to content

Commit dd7a7d9

Browse files
authored
feat: add metadata field to messages for stateful context tracking (#2125)
1 parent 09902bd commit dd7a7d9

15 files changed

Lines changed: 288 additions & 32 deletions

src/strands/agent/agent.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1025,7 +1025,7 @@ async def _convert_prompt_to_messages(self, prompt: AgentInput) -> Messages:
10251025
# Check if all item in input list are dictionaries
10261026
elif all(isinstance(item, dict) for item in prompt):
10271027
# Check if all items are messages
1028-
if all(all(key in item for key in Message.__annotations__.keys()) for item in prompt):
1028+
if all(all(key in item for key in Message.__required_keys__) for item in prompt):
10291029
# Messages input - add all messages to conversation
10301030
messages = cast(Messages, prompt)
10311031

src/strands/event_loop/_recover_message_on_max_tokens_reached.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -68,4 +68,7 @@ def recover_message_on_max_tokens_reached(message: Message) -> Message:
6868
}
6969
)
7070

71-
return {"content": valid_content, "role": message["role"]}
71+
recovered: Message = {"content": valid_content, "role": message["role"]}
72+
if "metadata" in message:
73+
recovered["metadata"] = message["metadata"]
74+
return recovered

src/strands/event_loop/event_loop.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -354,6 +354,13 @@ async def _handle_model_execution(
354354
stop_reason, message, usage, metrics = event["stop"]
355355
invocation_state.setdefault("request_state", {})
356356

357+
# Attach metadata to the assistant message immediately so it's
358+
# available to all downstream consumers (hooks, events, state).
359+
message["metadata"] = {
360+
"usage": usage,
361+
"metrics": metrics,
362+
}
363+
357364
after_model_call_event = AfterModelCallEvent(
358365
agent=agent,
359366
invocation_state=invocation_state,

src/strands/event_loop/streaming.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -488,6 +488,9 @@ async def stream_messages(
488488
logger.debug("model=<%s> | streaming messages", model)
489489

490490
messages = _normalize_messages(messages)
491+
# Whitelist only role and content before sending to the model provider.
492+
# This ensures metadata (and any future non-model fields) never leak to providers.
493+
messages = [Message(role=msg["role"], content=msg["content"]) for msg in messages]
491494
start_time = time.time()
492495

493496
chunks = model.stream(

src/strands/telemetry/tracer.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -527,9 +527,7 @@ def start_event_loop_cycle_span(
527527
event_loop_cycle_id = str(invocation_state.get("event_loop_cycle_id"))
528528
parent_span = parent_span if parent_span else invocation_state.get("event_loop_parent_span")
529529

530-
attributes: dict[str, AttributeValue] = self._get_common_attributes(
531-
operation_name="execute_event_loop_cycle"
532-
)
530+
attributes: dict[str, AttributeValue] = self._get_common_attributes(operation_name="execute_event_loop_cycle")
533531
attributes["event_loop.cycle_id"] = event_loop_cycle_id
534532

535533
if custom_trace_attributes:

src/strands/types/content.py

Lines changed: 29 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,11 +6,12 @@
66
- Bedrock docs: https://docs.aws.amazon.com/bedrock/latest/APIReference/API_Types_Amazon_Bedrock_Runtime.html
77
"""
88

9-
from typing import Literal
9+
from typing import Any, Literal
1010

1111
from typing_extensions import NotRequired, TypedDict
1212

1313
from .citations import CitationsContentBlock
14+
from .event_loop import Metrics, Usage
1415
from .media import DocumentContent, ImageContent, VideoContent
1516
from .tools import ToolResult, ToolUse
1617

@@ -177,17 +178,44 @@ class ContentBlockStop(TypedDict):
177178
"""
178179

179180

181+
class MessageMetadata(TypedDict, total=False):
182+
"""Optional metadata attached to a message.
183+
184+
Not sent to model providers — explicitly stripped before model calls.
185+
Persisted alongside the message in session storage.
186+
187+
Attributes:
188+
usage: Token usage information from the model response.
189+
metrics: Performance metrics from the model response.
190+
custom: Arbitrary user/framework metadata (e.g. compression provenance).
191+
"""
192+
193+
usage: Usage
194+
metrics: Metrics
195+
custom: dict[str, Any]
196+
197+
180198
class Message(TypedDict):
181199
"""A message in a conversation with the agent.
182200
183201
Attributes:
184202
content: The message content.
185203
role: The role of the message sender.
204+
metadata: Optional metadata, stripped before model calls.
186205
"""
187206

188207
content: list[ContentBlock]
189208
role: Role
209+
metadata: NotRequired[MessageMetadata]
190210

191211

192212
Messages = list[Message]
193213
"""A list of messages representing a conversation."""
214+
215+
216+
def get_message_metadata(message: Message) -> MessageMetadata:
217+
"""Get metadata for a message, returning empty dict if not present.
218+
219+
Individual fields (usage, metrics, custom) may not be present. Use .get() to safely access them.
220+
"""
221+
return message.get("metadata", {})

tests/strands/agent/hooks/test_agent_events.py

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -147,6 +147,7 @@ async def test_stream_e2e_success(alist):
147147
{"toolUse": {"input": {}, "name": "normal_tool", "toolUseId": "123"}},
148148
],
149149
"role": "assistant",
150+
"metadata": ANY,
150151
}
151152
},
152153
{
@@ -205,6 +206,7 @@ async def test_stream_e2e_success(alist):
205206
{"toolUse": {"input": {}, "name": "async_tool", "toolUseId": "1234"}},
206207
],
207208
"role": "assistant",
209+
"metadata": ANY,
208210
}
209211
},
210212
{
@@ -263,6 +265,7 @@ async def test_stream_e2e_success(alist):
263265
{"toolUse": {"input": {}, "name": "streaming_tool", "toolUseId": "12345"}},
264266
],
265267
"role": "assistant",
268+
"metadata": ANY,
266269
}
267270
},
268271
{
@@ -307,11 +310,11 @@ async def test_stream_e2e_success(alist):
307310
},
308311
{"event": {"contentBlockStop": {}}},
309312
{"event": {"messageStop": {"stopReason": "end_turn"}}},
310-
{"message": {"content": [{"text": "I invoked the tools!"}], "role": "assistant"}},
313+
{"message": {"content": [{"text": "I invoked the tools!"}], "role": "assistant", "metadata": ANY}},
311314
{
312315
"result": AgentResult(
313316
stop_reason="end_turn",
314-
message={"content": [{"text": "I invoked the tools!"}], "role": "assistant"},
317+
message={"content": [{"text": "I invoked the tools!"}], "role": "assistant", "metadata": ANY},
315318
metrics=ANY,
316319
state={},
317320
),
@@ -371,11 +374,11 @@ async def test_stream_e2e_throttle_and_redact(alist, mock_sleep):
371374
},
372375
{"event": {"contentBlockStop": {}}},
373376
{"event": {"messageStop": {"stopReason": "guardrail_intervened"}}},
374-
{"message": {"content": [{"text": "INPUT BLOCKED!"}], "role": "assistant"}},
377+
{"message": {"content": [{"text": "INPUT BLOCKED!"}], "role": "assistant", "metadata": ANY}},
375378
{
376379
"result": AgentResult(
377380
stop_reason="guardrail_intervened",
378-
message={"content": [{"text": "INPUT BLOCKED!"}], "role": "assistant"},
381+
message={"content": [{"text": "INPUT BLOCKED!"}], "role": "assistant", "metadata": ANY},
379382
metrics=ANY,
380383
state={},
381384
),
@@ -442,6 +445,7 @@ async def test_stream_e2e_reasoning_redacted_content(alist):
442445
{"text": "Response with redacted reasoning"},
443446
],
444447
"role": "assistant",
448+
"metadata": ANY,
445449
}
446450
},
447451
{
@@ -453,6 +457,7 @@ async def test_stream_e2e_reasoning_redacted_content(alist):
453457
{"text": "Response with redacted reasoning"},
454458
],
455459
"role": "assistant",
460+
"metadata": ANY,
456461
},
457462
metrics=ANY,
458463
state={},

tests/strands/agent/test_agent.py

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -336,7 +336,7 @@ def test_agent__call__(
336336
"stop_reason": result.stop_reason,
337337
}
338338
exp_result = {
339-
"message": {"content": [{"text": "test text"}], "role": "assistant"},
339+
"message": {"content": [{"text": "test text"}], "role": "assistant", "metadata": unittest.mock.ANY},
340340
"state": {},
341341
"stop_reason": "end_turn",
342342
}
@@ -781,6 +781,7 @@ def test_agent__call__callback(mock_model, agent, callback_handler, agenerator):
781781
{"reasoningContent": {"reasoningText": {"text": "value", "signature": "value"}}},
782782
{"text": "value"},
783783
],
784+
"metadata": unittest.mock.ANY,
784785
},
785786
),
786787
unittest.mock.call(
@@ -793,6 +794,7 @@ def test_agent__call__callback(mock_model, agent, callback_handler, agenerator):
793794
{"reasoningContent": {"reasoningText": {"text": "value", "signature": "value"}}},
794795
{"text": "value"},
795796
],
797+
"metadata": unittest.mock.ANY,
796798
},
797799
metrics=unittest.mock.ANY,
798800
state={},
@@ -817,7 +819,7 @@ async def test_agent__call__in_async_context(mock_model, agent, agenerator):
817819
result = agent("test")
818820

819821
tru_message = result.message
820-
exp_message = {"content": [{"text": "abc"}], "role": "assistant"}
822+
exp_message = {"content": [{"text": "abc"}], "role": "assistant", "metadata": unittest.mock.ANY}
821823
assert tru_message == exp_message
822824

823825

@@ -837,7 +839,7 @@ async def test_agent_invoke_async(mock_model, agent, agenerator):
837839
result = await agent.invoke_async("test")
838840

839841
tru_message = result.message
840-
exp_message = {"content": [{"text": "abc"}], "role": "assistant"}
842+
exp_message = {"content": [{"text": "abc"}], "role": "assistant", "metadata": unittest.mock.ANY}
841843
assert tru_message == exp_message
842844

843845

@@ -1128,7 +1130,7 @@ async def test_stream_async_multi_modal_input(mock_model, agent, agenerator, ali
11281130
tru_message = agent.messages
11291131
exp_message = [
11301132
{"content": prompt, "role": "user"},
1131-
{"content": [{"text": "I see text and an image"}], "role": "assistant"},
1133+
{"content": [{"text": "I see text and an image"}], "role": "assistant", "metadata": unittest.mock.ANY},
11321134
]
11331135
assert tru_message == exp_message
11341136

@@ -1966,7 +1968,11 @@ def shell(command: str):
19661968
}
19671969

19681970
# And that it continued to the LLM call
1969-
assert agent.messages[-1] == {"content": [{"text": "I invoked a tool!"}], "role": "assistant"}
1971+
assert agent.messages[-1] == {
1972+
"content": [{"text": "I invoked a tool!"}],
1973+
"role": "assistant",
1974+
"metadata": unittest.mock.ANY,
1975+
}
19701976

19711977

19721978
def test_agent_string_system_prompt():

tests/strands/agent/test_agent_cancellation.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22

33
import asyncio
44
import threading
5+
from unittest.mock import ANY
56

67
import pytest
78

@@ -31,7 +32,7 @@ async def test_agent_cancel_before_invocation():
3132
result = await agent.invoke_async("Hello")
3233

3334
assert result.stop_reason == "cancelled"
34-
assert result.message == {"role": "assistant", "content": [{"text": "Cancelled by user"}]}
35+
assert result.message == {"role": "assistant", "content": [{"text": "Cancelled by user"}], "metadata": ANY}
3536

3637

3738
@pytest.mark.asyncio

tests/strands/agent/test_agent_hooks.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -173,6 +173,7 @@ def test_agent__call__hooks(agent, hook_provider, agent_tool, mock_model, tool_u
173173
message={
174174
"content": [{"toolUse": tool_use}],
175175
"role": "assistant",
176+
"metadata": ANY,
176177
},
177178
stop_reason="tool_use",
178179
),
@@ -199,7 +200,7 @@ def test_agent__call__hooks(agent, hook_provider, agent_tool, mock_model, tool_u
199200
agent=agent,
200201
invocation_state=ANY,
201202
stop_response=AfterModelCallEvent.ModelStopResponse(
202-
message=mock_model.agent_responses[1],
203+
message={"role": "assistant", "content": [{"text": "I invoked a tool!"}], "metadata": ANY},
203204
stop_reason="end_turn",
204205
),
205206
exception=None,
@@ -246,6 +247,7 @@ async def test_agent_stream_async_hooks(agent, hook_provider, agent_tool, mock_m
246247
message={
247248
"content": [{"toolUse": tool_use}],
248249
"role": "assistant",
250+
"metadata": ANY,
249251
},
250252
stop_reason="tool_use",
251253
),
@@ -272,7 +274,7 @@ async def test_agent_stream_async_hooks(agent, hook_provider, agent_tool, mock_m
272274
agent=agent,
273275
invocation_state=ANY,
274276
stop_response=AfterModelCallEvent.ModelStopResponse(
275-
message=mock_model.agent_responses[1],
277+
message={"role": "assistant", "content": [{"text": "I invoked a tool!"}], "metadata": ANY},
276278
stop_reason="end_turn",
277279
),
278280
exception=None,

0 commit comments

Comments
 (0)