Skip to content

Commit b9474d5

Browse files
committed
feat(ui): add live status updates during agent execution
Add real-time status messages to the TUI showing what each agent is doing at any given moment. Status messages shown: - 'Compressing memory...' during conversation history preparation - 'Waiting for LLM provider...' during API call setup - 'Generating response...' after first chunk received - 'Executing {tool1}, {tool2} +N more...' during tool execution - 'Setting up sandbox environment...' during sandbox init Also renders thinking blocks in chat history from metadata and fixes indented thought display for multi-line thoughts in ThinkRenderer.
1 parent c9d2477 commit b9474d5

5 files changed

Lines changed: 84 additions & 8 deletions

File tree

strix/agents/base_agent.py

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -333,6 +333,13 @@ async def _initialize_sandbox_and_state(self, task: str) -> None:
333333
sandbox_mode = os.getenv("STRIX_SANDBOX_MODE", "false").lower() == "true"
334334
if not sandbox_mode and self.state.sandbox_id is None:
335335
from strix.runtime import get_runtime
336+
from strix.telemetry.tracer import get_global_tracer
337+
338+
tracer = get_global_tracer()
339+
if tracer:
340+
tracer.update_agent_system_message(
341+
self.state.agent_id, "Setting up sandbox environment..."
342+
)
336343

337344
try:
338345
runtime = get_runtime()
@@ -367,6 +374,9 @@ async def _initialize_sandbox_and_state(self, task: str) -> None:
367374
async def _process_iteration(self, tracer: Optional["Tracer"]) -> bool | None:
368375
final_response = None
369376

377+
if tracer:
378+
tracer.update_agent_system_message(self.state.agent_id, "Thinking...")
379+
370380
async for response in self.llm.generate(self.state.get_conversation_history()):
371381
final_response = response
372382
if tracer and response.content:
@@ -408,8 +418,19 @@ async def _process_iteration(self, tracer: Optional["Tracer"]) -> bool | None:
408418
)
409419

410420
if actions:
421+
if tracer:
422+
tool_names = [a.get("toolName") or a.get("tool_name") or "tool" for a in actions]
423+
display_names = tool_names[:2]
424+
overflow = len(tool_names) - 2
425+
suffix = f" +{overflow} more" if overflow > 0 else ""
426+
tracer.update_agent_system_message(
427+
self.state.agent_id, f"Executing {', '.join(display_names)}{suffix}..."
428+
)
411429
return await self._execute_actions(actions, tracer)
412430

431+
if tracer:
432+
tracer.update_agent_system_message(self.state.agent_id, "Processing response...")
433+
413434
return None
414435

415436
async def _execute_actions(self, actions: list[Any], tracer: Optional["Tracer"]) -> bool:

strix/interface/tool_components/thinking_renderer.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,8 @@ def render(cls, tool_data: dict[str, Any]) -> Static:
2323
text.append("\n ")
2424

2525
if thought:
26-
text.append(thought, style="italic dim")
26+
indented_thought = "\n ".join(thought.split("\n"))
27+
text.append(indented_thought, style="italic dim")
2728
else:
2829
text.append("Thinking...", style="italic dim")
2930

strix/interface/tui.py

Lines changed: 35 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1238,14 +1238,19 @@ def keymap_styled(keys: list[tuple[str, str]]) -> Text:
12381238
return (Text(" "), keymap, False)
12391239

12401240
if status == "running":
1241+
sys_msg = agent_data.get("system_message", "")
12411242
if self._agent_has_real_activity(agent_id):
12421243
animated_text = Text()
12431244
animated_text.append_text(self._get_sweep_animation(self._sweep_colors))
1245+
if sys_msg:
1246+
animated_text.append(sys_msg, style="dim italic")
1247+
animated_text.append(" ", style="dim")
12441248
animated_text.append("esc", style="white")
12451249
animated_text.append(" ", style="dim")
12461250
animated_text.append("stop", style="dim")
12471251
return (animated_text, keymap_styled([("ctrl-q", "quit")]), True)
1248-
animated_text = self._get_animated_verb_text(agent_id, "Initializing")
1252+
msg = sys_msg or "Initializing..."
1253+
animated_text = self._get_animated_verb_text(agent_id, msg)
12491254
return (animated_text, keymap_styled([("ctrl-q", "quit")]), True)
12501255

12511256
return (None, Text(), False)
@@ -1678,12 +1683,26 @@ def _render_chat_content(self, msg_data: dict[str, Any]) -> Any:
16781683
content = msg_data.get("content", "")
16791684
metadata = msg_data.get("metadata", {})
16801685

1681-
if not content:
1682-
return None
1683-
16841686
if role == "user":
1687+
if not content:
1688+
return None
16851689
return UserMessageRenderer.render_simple(content)
16861690

1691+
renderables = []
1692+
1693+
if "thinking_blocks" in metadata and metadata["thinking_blocks"]:
1694+
from strix.interface.tool_components.thinking_renderer import ThinkRenderer
1695+
1696+
for block in metadata["thinking_blocks"]:
1697+
thought = block.get("thinking", "")
1698+
if thought:
1699+
renderables.append(
1700+
ThinkRenderer.render({"args": {"thought": thought}}).renderable
1701+
)
1702+
1703+
if not content and not renderables:
1704+
return None
1705+
16871706
if metadata.get("interrupted"):
16881707
streaming_result = self._render_streaming_content(content)
16891708
interrupted_text = Text()
@@ -1692,7 +1711,18 @@ def _render_chat_content(self, msg_data: dict[str, Any]) -> Any:
16921711
interrupted_text.append("Interrupted by user", style="yellow dim")
16931712
return self._merge_renderables([streaming_result, interrupted_text])
16941713

1695-
return AgentMessageRenderer.render_simple(content)
1714+
if content:
1715+
msg_renderable = AgentMessageRenderer.render_simple(content)
1716+
renderables.append(msg_renderable)
1717+
1718+
if not renderables:
1719+
return None
1720+
1721+
if len(renderables) == 1:
1722+
r = renderables[0]
1723+
return self._sanitize_text(r) if isinstance(r, Text) else r
1724+
1725+
return self._merge_renderables(renderables)
16961726

16971727
def _render_tool_content_simple(self, tool_data: dict[str, Any]) -> Any:
16981728
tool_name = tool_data.get("tool_name", "Unknown Tool")

strix/llm/llm.py

Lines changed: 19 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -141,12 +141,21 @@ def set_agent_identity(self, agent_name: str | None, agent_id: str | None) -> No
141141
async def generate(
142142
self, conversation_history: list[dict[str, Any]]
143143
) -> AsyncIterator[LLMResponse]:
144+
from strix.telemetry.tracer import get_global_tracer
145+
146+
tracer = get_global_tracer()
147+
if tracer and self.agent_id:
148+
tracer.update_agent_system_message(self.agent_id, "Compressing memory...")
149+
144150
messages = self._prepare_messages(conversation_history)
145151
max_retries = int(Config.get("strix_llm_max_retries") or "5")
146152

147153
for attempt in range(max_retries + 1):
148154
try:
149-
async for response in self._stream(messages):
155+
if tracer and self.agent_id:
156+
tracer.update_agent_system_message(self.agent_id, "Waiting for LLM provider...")
157+
158+
async for response in self._stream(messages, tracer):
150159
yield response
151160
return # noqa: TRY300
152161
except Exception as e: # noqa: BLE001
@@ -155,15 +164,23 @@ async def generate(
155164
wait = min(10, 2 * (2**attempt))
156165
await asyncio.sleep(wait)
157166

158-
async def _stream(self, messages: list[dict[str, Any]]) -> AsyncIterator[LLMResponse]:
167+
async def _stream(
168+
self, messages: list[dict[str, Any]], tracer: Any = None
169+
) -> AsyncIterator[LLMResponse]:
159170
accumulated = ""
160171
chunks: list[Any] = []
161172
done_streaming = 0
173+
first_chunk_received = False
162174

163175
self._total_stats.requests += 1
164176
response = await acompletion(**self._build_completion_args(messages), stream=True)
165177

166178
async for chunk in response:
179+
if not first_chunk_received:
180+
first_chunk_received = True
181+
if tracer and self.agent_id:
182+
tracer.update_agent_system_message(self.agent_id, "Generating response...")
183+
167184
chunks.append(chunk)
168185
if done_streaming:
169186
done_streaming += 1

strix/telemetry/tracer.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@
3636
_OTEL_BOOTSTRAPPED = False
3737
_OTEL_REMOTE_ENABLED = False
3838

39+
3940
def get_global_tracer() -> Optional["Tracer"]:
4041
return _global_tracer
4142

@@ -437,6 +438,7 @@ def log_agent_creation(
437438
"name": name,
438439
"task": task,
439440
"status": "running",
441+
"system_message": "",
440442
"parent_id": parent_id,
441443
"created_at": datetime.now(UTC).isoformat(),
442444
"updated_at": datetime.now(UTC).isoformat(),
@@ -585,6 +587,11 @@ def update_agent_status(
585587
source="strix.agents",
586588
)
587589

590+
def update_agent_system_message(self, agent_id: str, message: str) -> None:
591+
if agent_id in self.agents:
592+
self.agents[agent_id]["system_message"] = message
593+
self.agents[agent_id]["updated_at"] = datetime.now(UTC).isoformat()
594+
588595
def set_scan_config(self, config: dict[str, Any]) -> None:
589596
self.scan_config = config
590597
self.run_metadata.update(

0 commit comments

Comments
 (0)