-
Notifications
You must be signed in to change notification settings - Fork 103
Expand file tree
/
Copy pathReasoningMessage.tsx
More file actions
238 lines (218 loc) · 8.77 KB
/
ReasoningMessage.tsx
File metadata and controls
238 lines (218 loc) · 8.77 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
import React, { useState, useEffect, useRef, useLayoutEffect } from "react";
import type { DisplayedMessage } from "@/common/types/message";
import { TypewriterMarkdown } from "./TypewriterMarkdown";
import { normalizeReasoningMarkdown } from "./MarkdownStyles";
import { cn } from "@/common/lib/utils";
import { Shimmer } from "../AIElements/Shimmer";
import { Lightbulb } from "lucide-react";
interface ReasoningMessageProps {
message: DisplayedMessage & { type: "reasoning" };
className?: string;
/**
* Workspace this reasoning belongs to. Forwarded to TypewriterMarkdown so the
* smoothing engine can target the model's live emission rate. Optional —
* tests render this component without a workspace context.
*/
workspaceId?: string;
}
const REASONING_FONT_CLASSES = "font-primary text-[12px] leading-[18px]";
const MAX_SUMMARY_CHARS = 240;
function parseLeadingBoldSummary(
summary: string
): { boldText: string; trailingText: string } | null {
// OpenAI reasoning summaries commonly start with markdown bold: "**Title**".
// Parse only a leading pair so we can keep the cheap plain-text header render while
// preserving the expected visual emphasis.
if (!summary.startsWith("**")) {
return null;
}
const closingMarkerIndex = summary.indexOf("**", 2);
if (closingMarkerIndex <= 2) {
return null;
}
const boldText = summary.slice(2, closingMarkerIndex).trim();
if (!boldText) {
return null;
}
return {
boldText,
trailingText: summary.slice(closingMarkerIndex + 2),
};
}
export const ReasoningMessage: React.FC<ReasoningMessageProps> = ({
message,
className,
workspaceId,
}) => {
const [isExpanded, setIsExpanded] = useState(message.isStreaming);
// Track the height when expanded to reserve space during collapse transitions
const [expandedHeight, setExpandedHeight] = useState<number | null>(null);
const contentRef = useRef<HTMLDivElement>(null);
const content = message.content;
const isStreaming = message.isStreaming;
const trimmedContent = content?.trim() ?? "";
const hasContent = trimmedContent.length > 0;
const summaryLineRaw = hasContent ? (trimmedContent.split(/\r?\n/)[0] ?? "") : "";
const summaryLine =
summaryLineRaw.length > MAX_SUMMARY_CHARS
? `${summaryLineRaw.slice(0, MAX_SUMMARY_CHARS)}…`
: summaryLineRaw;
const parsedLeadingBoldSummary = parseLeadingBoldSummary(summaryLine);
const hasAdditionalLines = hasContent && /[\r\n]/.test(trimmedContent);
// OpenAI models often emit terse, single-line traces; surface them inline instead of hiding behind the label.
const isSingleLineTrace = !isStreaming && hasContent && !hasAdditionalLines;
const isCollapsible = !isStreaming && hasContent && hasAdditionalLines;
const showEllipsis = isCollapsible && !isExpanded;
const showExpandedContent = isExpanded && !isSingleLineTrace;
// Capture expanded height before settled collapse/expand transitions. During live
// streaming the container is height:auto and doesn't use this value, so skip the
// synchronous scrollHeight read on every token delta.
useLayoutEffect(() => {
if (!isStreaming && contentRef.current && isExpanded && !isSingleLineTrace) {
setExpandedHeight(contentRef.current.scrollHeight);
}
}, [isStreaming, isExpanded, isSingleLineTrace, content]);
const wasStreamingRef = useRef(isStreaming);
const isLastPartOfMessage =
"isLastPartOfMessage" in message ? message.isLastPartOfMessage : false;
// Auto-collapse only when reasoning reached *natural* completion — i.e. the
// stream ended while this reasoning part was still the terminal block of the
// message. When another part (text/tool) follows the reasoning, its
// `isLastPartOfMessage` flips false in the same aggregator snapshot that turns
// `isStreaming` off, which used to trigger a mid-turn 200ms height→0 animation
// (a very visible vertical tear). Keeping the reasoning expanded in that case
// lets the user continue reading it while the assistant moves on.
useEffect(() => {
const wasStreaming = wasStreamingRef.current;
wasStreamingRef.current = isStreaming;
if (wasStreaming && !isStreaming && isLastPartOfMessage) {
setIsExpanded(false);
}
}, [isStreaming, isLastPartOfMessage]);
const toggleExpanded = () => {
if (!isCollapsible) {
return;
}
setIsExpanded(!isExpanded);
};
// Render appropriate content based on state
const renderContent = () => {
// Empty streaming state
if (isStreaming && !content) {
return <div className="text-thinking-mode opacity-60">Thinking...</div>;
}
if (!content) {
return null;
}
// Preserve single newlines so short section headers (e.g. "Fixing …") don't get
// collapsed into the previous paragraph by the markdown renderer.
//
// Use TypewriterMarkdown for both streaming and settled reasoning so the component
// identity is stable across stream completion — swapping to MarkdownRenderer at
// stream end would unmount/remount the markdown subtree and visibly flash the
// content. isComplete={!isStreaming} cleanly bypasses the smoothing engine once
// the stream ends, matching the prior static-render behavior.
// React Compiler auto-memoizes this normalize call between renders that
// share the same `content` value; no manual useMemo needed.
const normalizedContent = normalizeReasoningMarkdown(content);
return (
<TypewriterMarkdown
content={normalizedContent}
isComplete={!isStreaming}
preserveLineBreaks
streamKey={message.historyId}
streamSource={message.streamPresentation?.source}
workspaceId={workspaceId}
/>
);
};
return (
<div
className={cn(
"my-2 px-2 py-1 bg-[color-mix(in_srgb,var(--color-thinking-mode)_5%,transparent)] rounded relative",
className
)}
>
<div
className={cn(
"flex items-center justify-between gap-2 select-none",
isCollapsible && "cursor-pointer",
isExpanded && !isSingleLineTrace && "mb-1.5"
)}
onClick={isCollapsible ? toggleExpanded : undefined}
>
<div
className={cn(
"flex flex-1 items-center gap-1 min-w-0 text-xs opacity-80",
"text-thinking-mode"
)}
>
<span className="text-xs">
<Lightbulb className={cn("size-3.5", isStreaming && "animate-pulse")} />
</span>
<div className="flex min-w-0 items-center gap-1 truncate">
{isStreaming ? (
<Shimmer colorClass="var(--color-thinking-mode)">Thinking...</Shimmer>
) : hasContent ? (
<span className={cn("truncate whitespace-nowrap text-text", REASONING_FONT_CLASSES)}>
{parsedLeadingBoldSummary ? (
<>
<strong>{parsedLeadingBoldSummary.boldText}</strong>
{parsedLeadingBoldSummary.trailingText}
</>
) : (
summaryLine
)}
</span>
) : (
"Thought"
)}
{showEllipsis && (
<span
className="text-[11px] tracking-widest text-[color:var(--color-text)] opacity-70"
data-testid="reasoning-ellipsis"
>
...
</span>
)}
</div>
</div>
{isCollapsible && (
<span
className={cn(
"text-thinking-mode opacity-60 transition-transform duration-200 ease-in-out text-xs",
isExpanded ? "rotate-90" : "rotate-0"
)}
>
▸
</span>
)}
</div>
{/* Always render the content container to prevent layout shifts.
Use CSS transitions only for user-initiated collapse/expand of *settled*
reasoning. During live streaming we leave the container uncontrolled
(height: auto, no transition); otherwise each incoming delta re-targets
scrollHeight through a 200ms height animation, which clips newly arrived
tokens and produces a slow drip-in effect that reads as jitter. */}
<div
ref={contentRef}
className={cn(
REASONING_FONT_CLASSES,
"italic opacity-85 [&_p]:mt-0 [&_p]:mb-1 [&_p:last-child]:mb-0",
!isStreaming && "overflow-hidden transition-[height,opacity] duration-200 ease-in-out"
)}
style={
isStreaming
? undefined
: {
height: showExpandedContent ? (expandedHeight ?? "auto") : 0,
opacity: showExpandedContent ? 1 : 0,
}
}
aria-hidden={!showExpandedContent}
>
{isStreaming || showExpandedContent ? renderContent() : null}
</div>
</div>
);
};