Skip to content

Commit 7351340

Browse files
Merge branch 'strands-agents:main' into message-requirements
2 parents dfd66b6 + 7226025 commit 7351340

10 files changed

Lines changed: 538 additions & 107 deletions

File tree

src/strands/event_loop/streaming.py

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
ModelStopReason,
1111
ModelStreamChunkEvent,
1212
ModelStreamEvent,
13+
ReasoningRedactedContentStreamEvent,
1314
ReasoningSignatureStreamEvent,
1415
ReasoningTextStreamEvent,
1516
TextStreamEvent,
@@ -170,6 +171,10 @@ def handle_content_block_delta(
170171
delta=delta_content,
171172
)
172173

174+
elif redacted_content := delta_content["reasoningContent"].get("redactedContent"):
175+
state["redactedContent"] = state.get("redactedContent", b"") + redacted_content
176+
typed_event = ReasoningRedactedContentStreamEvent(redacted_content=redacted_content, delta=delta_content)
177+
173178
return state, typed_event
174179

175180

@@ -188,6 +193,7 @@ def handle_content_block_stop(state: dict[str, Any]) -> dict[str, Any]:
188193
text = state["text"]
189194
reasoning_text = state["reasoningText"]
190195
citations_content = state["citationsContent"]
196+
redacted_content = state.get("redactedContent")
191197

192198
if current_tool_use:
193199
if "input" not in current_tool_use:
@@ -231,6 +237,9 @@ def handle_content_block_stop(state: dict[str, Any]) -> dict[str, Any]:
231237

232238
content.append(content_block)
233239
state["reasoningText"] = ""
240+
elif redacted_content:
241+
content.append({"reasoningContent": {"redactedContent": redacted_content}})
242+
state["redactedContent"] = b""
234243

235244
return state
236245

src/strands/models/openai.py

Lines changed: 52 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -64,12 +64,10 @@ def __init__(self, client_args: Optional[dict[str, Any]] = None, **model_config:
6464
"""
6565
validate_config_keys(model_config, self.OpenAIConfig)
6666
self.config = dict(model_config)
67+
self.client_args = client_args or {}
6768

6869
logger.debug("config=<%s> | initializing", self.config)
6970

70-
client_args = client_args or {}
71-
self.client = openai.AsyncOpenAI(**client_args)
72-
7371
@override
7472
def update_config(self, **model_config: Unpack[OpenAIConfig]) -> None: # type: ignore[override]
7573
"""Update the OpenAI model configuration with the provided arguments.
@@ -379,58 +377,60 @@ async def stream(
379377
logger.debug("formatted request=<%s>", request)
380378

381379
logger.debug("invoking model")
382-
response = await self.client.chat.completions.create(**request)
383-
384-
logger.debug("got response from model")
385-
yield self.format_chunk({"chunk_type": "message_start"})
386-
yield self.format_chunk({"chunk_type": "content_start", "data_type": "text"})
387-
388-
tool_calls: dict[int, list[Any]] = {}
389-
390-
async for event in response:
391-
# Defensive: skip events with empty or missing choices
392-
if not getattr(event, "choices", None):
393-
continue
394-
choice = event.choices[0]
395-
396-
if choice.delta.content:
397-
yield self.format_chunk(
398-
{"chunk_type": "content_delta", "data_type": "text", "data": choice.delta.content}
399-
)
400-
401-
if hasattr(choice.delta, "reasoning_content") and choice.delta.reasoning_content:
402-
yield self.format_chunk(
403-
{
404-
"chunk_type": "content_delta",
405-
"data_type": "reasoning_content",
406-
"data": choice.delta.reasoning_content,
407-
}
408-
)
409380

410-
for tool_call in choice.delta.tool_calls or []:
411-
tool_calls.setdefault(tool_call.index, []).append(tool_call)
381+
async with openai.AsyncOpenAI(**self.client_args) as client:
382+
response = await client.chat.completions.create(**request)
412383

413-
if choice.finish_reason:
414-
break
384+
logger.debug("got response from model")
385+
yield self.format_chunk({"chunk_type": "message_start"})
386+
yield self.format_chunk({"chunk_type": "content_start", "data_type": "text"})
415387

416-
yield self.format_chunk({"chunk_type": "content_stop", "data_type": "text"})
388+
tool_calls: dict[int, list[Any]] = {}
417389

418-
for tool_deltas in tool_calls.values():
419-
yield self.format_chunk({"chunk_type": "content_start", "data_type": "tool", "data": tool_deltas[0]})
390+
async for event in response:
391+
# Defensive: skip events with empty or missing choices
392+
if not getattr(event, "choices", None):
393+
continue
394+
choice = event.choices[0]
395+
396+
if choice.delta.content:
397+
yield self.format_chunk(
398+
{"chunk_type": "content_delta", "data_type": "text", "data": choice.delta.content}
399+
)
400+
401+
if hasattr(choice.delta, "reasoning_content") and choice.delta.reasoning_content:
402+
yield self.format_chunk(
403+
{
404+
"chunk_type": "content_delta",
405+
"data_type": "reasoning_content",
406+
"data": choice.delta.reasoning_content,
407+
}
408+
)
420409

421-
for tool_delta in tool_deltas:
422-
yield self.format_chunk({"chunk_type": "content_delta", "data_type": "tool", "data": tool_delta})
410+
for tool_call in choice.delta.tool_calls or []:
411+
tool_calls.setdefault(tool_call.index, []).append(tool_call)
423412

424-
yield self.format_chunk({"chunk_type": "content_stop", "data_type": "tool"})
413+
if choice.finish_reason:
414+
break
425415

426-
yield self.format_chunk({"chunk_type": "message_stop", "data": choice.finish_reason})
416+
yield self.format_chunk({"chunk_type": "content_stop", "data_type": "text"})
427417

428-
# Skip remaining events as we don't have use for anything except the final usage payload
429-
async for event in response:
430-
_ = event
418+
for tool_deltas in tool_calls.values():
419+
yield self.format_chunk({"chunk_type": "content_start", "data_type": "tool", "data": tool_deltas[0]})
431420

432-
if event.usage:
433-
yield self.format_chunk({"chunk_type": "metadata", "data": event.usage})
421+
for tool_delta in tool_deltas:
422+
yield self.format_chunk({"chunk_type": "content_delta", "data_type": "tool", "data": tool_delta})
423+
424+
yield self.format_chunk({"chunk_type": "content_stop", "data_type": "tool"})
425+
426+
yield self.format_chunk({"chunk_type": "message_stop", "data": choice.finish_reason})
427+
428+
# Skip remaining events as we don't have use for anything except the final usage payload
429+
async for event in response:
430+
_ = event
431+
432+
if event.usage:
433+
yield self.format_chunk({"chunk_type": "metadata", "data": event.usage})
434434

435435
logger.debug("finished streaming response from model")
436436

@@ -449,11 +449,12 @@ async def structured_output(
449449
Yields:
450450
Model events with the last being the structured output.
451451
"""
452-
response: ParsedChatCompletion = await self.client.beta.chat.completions.parse( # type: ignore
453-
model=self.get_config()["model_id"],
454-
messages=self.format_request(prompt, system_prompt=system_prompt)["messages"],
455-
response_format=output_model,
456-
)
452+
async with openai.AsyncOpenAI(**self.client_args) as client:
453+
response: ParsedChatCompletion = await client.beta.chat.completions.parse(
454+
model=self.get_config()["model_id"],
455+
messages=self.format_request(prompt, system_prompt=system_prompt)["messages"],
456+
response_format=output_model,
457+
)
457458

458459
parsed: T | None = None
459460
# Find the first choice with tool_calls

src/strands/telemetry/tracer.py

Lines changed: 27 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -207,6 +207,30 @@ def _add_event(self, span: Optional[Span], event_name: str, event_attributes: Di
207207

208208
span.add_event(event_name, attributes=event_attributes)
209209

210+
def _get_event_name_for_message(self, message: Message) -> str:
211+
"""Determine the appropriate OpenTelemetry event name for a message.
212+
213+
According to OpenTelemetry semantic conventions v1.36.0, messages containing tool results
214+
should be labeled as 'gen_ai.tool.message' regardless of their role field.
215+
This ensures proper categorization of tool responses in traces.
216+
217+
Note: The GenAI namespace is experimental and may change in future versions.
218+
219+
Reference: https://github.com/open-telemetry/semantic-conventions/blob/v1.36.0/docs/gen-ai/gen-ai-events.md#event-gen_aitoolmessage
220+
221+
Args:
222+
message: The message to determine the event name for
223+
224+
Returns:
225+
The OpenTelemetry event name (e.g., 'gen_ai.user.message', 'gen_ai.tool.message')
226+
"""
227+
# Check if the message contains a tool result
228+
for content_block in message.get("content", []):
229+
if "toolResult" in content_block:
230+
return "gen_ai.tool.message"
231+
232+
return f"gen_ai.{message['role']}.message"
233+
210234
def start_model_invoke_span(
211235
self,
212236
messages: Messages,
@@ -240,7 +264,7 @@ def start_model_invoke_span(
240264
for message in messages:
241265
self._add_event(
242266
span,
243-
f"gen_ai.{message['role']}.message",
267+
self._get_event_name_for_message(message),
244268
{"content": serialize(message["content"])},
245269
)
246270
return span
@@ -379,7 +403,7 @@ def start_event_loop_cycle_span(
379403
for message in messages or []:
380404
self._add_event(
381405
span,
382-
f"gen_ai.{message['role']}.message",
406+
self._get_event_name_for_message(message),
383407
{"content": serialize(message["content"])},
384408
)
385409

@@ -456,7 +480,7 @@ def start_agent_span(
456480
for message in messages:
457481
self._add_event(
458482
span,
459-
f"gen_ai.{message['role']}.message",
483+
self._get_event_name_for_message(message),
460484
{"content": serialize(message["content"])},
461485
)
462486

src/strands/types/_events.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -169,6 +169,14 @@ def __init__(self, delta: ContentBlockDelta, reasoning_text: str | None) -> None
169169
super().__init__({"reasoningText": reasoning_text, "delta": delta, "reasoning": True})
170170

171171

172+
class ReasoningRedactedContentStreamEvent(ModelStreamEvent):
173+
"""Event emitted during redacted content streaming."""
174+
175+
def __init__(self, delta: ContentBlockDelta, redacted_content: bytes | None) -> None:
176+
"""Initialize with delta and redacted content."""
177+
super().__init__({"reasoningRedactedContent": redacted_content, "delta": delta, "reasoning": True})
178+
179+
172180
class ReasoningSignatureStreamEvent(ModelStreamEvent):
173181
"""Event emitted during reasoning signature streaming."""
174182

tests/fixtures/mocked_model_provider.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -72,6 +72,10 @@ def map_agent_message_to_events(self, agent_message: Union[Message, RedactionMes
7272
stop_reason = "guardrail_intervened"
7373
else:
7474
for content in agent_message["content"]:
75+
if "reasoningContent" in content:
76+
yield {"contentBlockStart": {"start": {}}}
77+
yield {"contentBlockDelta": {"delta": {"reasoningContent": content["reasoningContent"]}}}
78+
yield {"contentBlockStop": {}}
7579
if "text" in content:
7680
yield {"contentBlockStart": {"start": {}}}
7781
yield {"contentBlockDelta": {"delta": {"text": content["text"]}}}

tests/strands/agent/hooks/test_agent_events.py

Lines changed: 78 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -387,6 +387,84 @@ async def test_stream_e2e_throttle_and_redact(alist, mock_sleep):
387387
assert typed_events == []
388388

389389

390+
@pytest.mark.asyncio
391+
async def test_stream_e2e_reasoning_redacted_content(alist):
392+
mock_provider = MockedModelProvider(
393+
[
394+
{
395+
"role": "assistant",
396+
"content": [
397+
{"reasoningContent": {"redactedContent": b"test_redacted_data"}},
398+
{"text": "Response with redacted reasoning"},
399+
],
400+
},
401+
]
402+
)
403+
404+
mock_callback = unittest.mock.Mock()
405+
agent = Agent(model=mock_provider, callback_handler=mock_callback)
406+
407+
stream = agent.stream_async("Test redacted content")
408+
409+
tru_events = await alist(stream)
410+
exp_events = [
411+
{"init_event_loop": True},
412+
{"start": True},
413+
{"start_event_loop": True},
414+
{"event": {"messageStart": {"role": "assistant"}}},
415+
{"event": {"contentBlockStart": {"start": {}}}},
416+
{"event": {"contentBlockDelta": {"delta": {"reasoningContent": {"redactedContent": b"test_redacted_data"}}}}},
417+
{
418+
**any_props,
419+
"reasoningRedactedContent": b"test_redacted_data",
420+
"delta": {"reasoningContent": {"redactedContent": b"test_redacted_data"}},
421+
"reasoning": True,
422+
},
423+
{"event": {"contentBlockStop": {}}},
424+
{"event": {"contentBlockStart": {"start": {}}}},
425+
{"event": {"contentBlockDelta": {"delta": {"text": "Response with redacted reasoning"}}}},
426+
{
427+
**any_props,
428+
"data": "Response with redacted reasoning",
429+
"delta": {"text": "Response with redacted reasoning"},
430+
},
431+
{"event": {"contentBlockStop": {}}},
432+
{"event": {"messageStop": {"stopReason": "end_turn"}}},
433+
{
434+
"message": {
435+
"content": [
436+
{"reasoningContent": {"redactedContent": b"test_redacted_data"}},
437+
{"text": "Response with redacted reasoning"},
438+
],
439+
"role": "assistant",
440+
}
441+
},
442+
{
443+
"result": AgentResult(
444+
stop_reason="end_turn",
445+
message={
446+
"content": [
447+
{"reasoningContent": {"redactedContent": b"test_redacted_data"}},
448+
{"text": "Response with redacted reasoning"},
449+
],
450+
"role": "assistant",
451+
},
452+
metrics=ANY,
453+
state={},
454+
)
455+
},
456+
]
457+
assert tru_events == exp_events
458+
459+
exp_calls = [call(**event) for event in exp_events]
460+
act_calls = mock_callback.call_args_list
461+
assert act_calls == exp_calls
462+
463+
# Ensure that all events coming out of the agent are *not* typed events
464+
typed_events = [event for event in tru_events if isinstance(event, TypedEvent)]
465+
assert typed_events == []
466+
467+
390468
@pytest.mark.asyncio
391469
async def test_event_loop_cycle_text_response_throttling_early_end(
392470
agenerator,

0 commit comments

Comments
 (0)