Skip to content

Commit ef21586

Browse files
anandgupta42claude
andcommitted
fix: address remaining review findings — history, debounce, tests
- Fix history storing original text instead of enhanced text by passing `inputText` explicitly to `history.append()` instead of spreading `store.prompt` which may contain stale state - Add concurrency guard (`enhancingInProgress` flag) to prevent multiple concurrent auto-enhance LLM calls from rapid submissions - Consolidate magic string into `ENHANCE_NAME` constant used across agent name, user agent, log service, and ID derivation - Add justifying comment for `as any` cast on synthetic IDs explaining why branded types are safely bypassed - Add `isAutoEnhanceEnabled()` tests (5 cases): config absent, present but missing flag, false, true, undefined - Add `enhancePrompt()` tests (10 cases): empty input, whitespace, successful enhancement, think tag stripping, code fence stripping, stream.text failure, stream init failure, empty LLM response, think tags with no content, combined pipeline Test count: 32 -> 48 Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
1 parent 2765196 commit ef21586

3 files changed

Lines changed: 172 additions & 8 deletions

File tree

packages/opencode/src/altimate/enhance-prompt.ts

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -6,11 +6,13 @@ import { Config } from "@/config/config"
66
import { Log } from "@/util/log"
77
import { MessageV2 } from "@/session/message-v2"
88

9-
const log = Log.create({ service: "enhance-prompt" })
10-
9+
const ENHANCE_NAME = "enhance-prompt"
1110
const ENHANCE_TIMEOUT_MS = 15_000
12-
// Synthetic ID for enhancement requests — not a real session/message
13-
const ENHANCE_ID = "enhance-prompt" as any
11+
// MessageV2.User requires branded MessageID/SessionID types, but this is a
12+
// synthetic message that never enters the session store — cast is safe here.
13+
const ENHANCE_ID = ENHANCE_NAME as any
14+
15+
const log = Log.create({ service: ENHANCE_NAME })
1416

1517
// Research-backed enhancement prompt based on:
1618
// - AutoPrompter (arxiv 2504.20196): 5 missing info categories that cause 27% lower edit correctness
@@ -93,7 +95,7 @@ export async function enhancePrompt(text: string): Promise<string> {
9395
(await Provider.getModel(defaultModel.providerID, defaultModel.modelID))
9496

9597
const agent: Agent.Info = {
96-
name: "enhance-prompt",
98+
name: ENHANCE_NAME,
9799
mode: "primary",
98100
hidden: true,
99101
options: {},
@@ -107,7 +109,7 @@ export async function enhancePrompt(text: string): Promise<string> {
107109
sessionID: ENHANCE_ID,
108110
role: "user",
109111
time: { created: Date.now() },
110-
agent: "enhance-prompt",
112+
agent: ENHANCE_NAME,
111113
model: {
112114
providerID: model.providerID,
113115
modelID: model.id,

packages/opencode/src/cli/cmd/tui/component/prompt/index.tsx

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@ import { useTextareaKeybindings } from "../textarea-keybindings"
3636
import { DialogSkill } from "../dialog-skill"
3737
// altimate_change start - import prompt enhancement
3838
import { enhancePrompt, isAutoEnhanceEnabled } from "@/altimate/enhance-prompt"
39+
let enhancingInProgress = false
3940
// altimate_change end
4041

4142
export type PromptProps = {
@@ -613,10 +614,12 @@ export function Prompt(props: PromptProps) {
613614

614615
// altimate_change start - auto-enhance prompt before expanding paste text
615616
// Only enhance the raw user text, not shell commands or slash commands
616-
if (store.mode === "normal" && !inputText.startsWith("/")) {
617+
// Guard prevents concurrent enhancement calls from rapid submissions
618+
if (store.mode === "normal" && !inputText.startsWith("/") && !enhancingInProgress) {
617619
try {
618620
const autoEnhance = await isAutoEnhanceEnabled()
619621
if (autoEnhance) {
622+
enhancingInProgress = true
620623
toast.show({ message: "Enhancing prompt...", variant: "info", duration: 2000 })
621624
const enhanced = await enhancePrompt(inputText)
622625
if (enhanced !== inputText) {
@@ -627,6 +630,8 @@ export function Prompt(props: PromptProps) {
627630
} catch (err) {
628631
// Enhancement failure should never block prompt submission
629632
console.error("auto-enhance failed, using original prompt", err)
633+
} finally {
634+
enhancingInProgress = false
630635
}
631636
}
632637
// altimate_change end
@@ -720,6 +725,7 @@ export function Prompt(props: PromptProps) {
720725
}
721726
history.append({
722727
...store.prompt,
728+
input: inputText,
723729
mode: currentMode,
724730
})
725731
input.extmarks.clear()

packages/opencode/test/altimate/enhance-prompt.test.ts

Lines changed: 157 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,62 @@
1-
import { describe, expect, test } from "bun:test"
1+
import { describe, expect, test, mock, beforeEach } from "bun:test"
22
import { clean, stripThinkTags } from "../../src/altimate/enhance-prompt"
33

4+
// Mock Config for isAutoEnhanceEnabled tests
5+
let mockConfig: any = {}
6+
mock.module("@/config/config", () => ({
7+
Config: {
8+
get: () => Promise.resolve(mockConfig),
9+
},
10+
}))
11+
12+
// Mock Provider and LLM for enhancePrompt tests
13+
let mockStreamResult: string | undefined = "enhanced result"
14+
let mockStreamShouldThrow = false
15+
mock.module("@/provider/provider", () => ({
16+
Provider: {
17+
defaultModel: () =>
18+
Promise.resolve({ providerID: "test-provider", modelID: "test-model" }),
19+
getSmallModel: () =>
20+
Promise.resolve({ providerID: "test-provider", id: "test-small", modelID: "test-small" }),
21+
getModel: () =>
22+
Promise.resolve({ providerID: "test-provider", id: "test-model", modelID: "test-model" }),
23+
},
24+
}))
25+
26+
mock.module("@/session/llm", () => ({
27+
LLM: {
28+
stream: () => {
29+
if (mockStreamShouldThrow) return Promise.reject(new Error("stream init failed"))
30+
return Promise.resolve({
31+
text: mockStreamResult !== undefined
32+
? Promise.resolve(mockStreamResult)
33+
: Promise.reject(new Error("stream text failed")),
34+
})
35+
},
36+
},
37+
}))
38+
39+
mock.module("@/util/log", () => ({
40+
Log: {
41+
create: () => ({
42+
info: () => {},
43+
error: () => {},
44+
debug: () => {},
45+
}),
46+
},
47+
}))
48+
49+
mock.module("@/agent/agent", () => ({
50+
Agent: {},
51+
}))
52+
53+
mock.module("@/session/message-v2", () => ({
54+
MessageV2: {},
55+
}))
56+
57+
// Import after mocking
58+
const { enhancePrompt, isAutoEnhanceEnabled } = await import("../../src/altimate/enhance-prompt")
59+
460
describe("enhance-prompt clean()", () => {
561
test("strips markdown code fences", () => {
662
expect(clean("```\nfixed prompt\n```")).toBe("fixed prompt")
@@ -149,3 +205,103 @@ describe("enhance-prompt combined pipeline", () => {
149205
expect(result).toBe("Add a created_at timestamp column to the users dbt model.")
150206
})
151207
})
208+
209+
describe("isAutoEnhanceEnabled()", () => {
210+
beforeEach(() => {
211+
mockConfig = {}
212+
})
213+
214+
test("returns false when experimental config is absent", async () => {
215+
mockConfig = {}
216+
expect(await isAutoEnhanceEnabled()).toBe(false)
217+
})
218+
219+
test("returns false when experimental exists but auto_enhance_prompt is missing", async () => {
220+
mockConfig = { experimental: {} }
221+
expect(await isAutoEnhanceEnabled()).toBe(false)
222+
})
223+
224+
test("returns false when auto_enhance_prompt is false", async () => {
225+
mockConfig = { experimental: { auto_enhance_prompt: false } }
226+
expect(await isAutoEnhanceEnabled()).toBe(false)
227+
})
228+
229+
test("returns true when auto_enhance_prompt is true", async () => {
230+
mockConfig = { experimental: { auto_enhance_prompt: true } }
231+
expect(await isAutoEnhanceEnabled()).toBe(true)
232+
})
233+
234+
test("returns false when auto_enhance_prompt is undefined", async () => {
235+
mockConfig = { experimental: { auto_enhance_prompt: undefined } }
236+
expect(await isAutoEnhanceEnabled()).toBe(false)
237+
})
238+
})
239+
240+
describe("enhancePrompt()", () => {
241+
beforeEach(() => {
242+
mockStreamResult = "enhanced result"
243+
mockStreamShouldThrow = false
244+
})
245+
246+
test("returns original text for empty input", async () => {
247+
expect(await enhancePrompt("")).toBe("")
248+
})
249+
250+
test("returns original text for whitespace-only input", async () => {
251+
expect(await enhancePrompt(" ")).toBe(" ")
252+
})
253+
254+
test("returns enhanced text from LLM", async () => {
255+
mockStreamResult = "Investigate the failing test and fix it."
256+
const result = await enhancePrompt("fix the test")
257+
expect(result).toBe("Investigate the failing test and fix it.")
258+
})
259+
260+
test("strips think tags from LLM response", async () => {
261+
mockStreamResult = "<think>let me reason</think>Enhanced prompt here"
262+
const result = await enhancePrompt("do something")
263+
expect(result).toBe("Enhanced prompt here")
264+
})
265+
266+
test("strips code fences from LLM response", async () => {
267+
mockStreamResult = '```\nEnhanced prompt here\n```'
268+
const result = await enhancePrompt("do something")
269+
expect(result).toBe("Enhanced prompt here")
270+
})
271+
272+
test("returns original text when LLM stream.text fails", async () => {
273+
mockStreamResult = undefined // causes stream.text to reject
274+
const result = await enhancePrompt("fix the bug")
275+
expect(result).toBe("fix the bug")
276+
})
277+
278+
test("returns original text when LLM stream init fails", async () => {
279+
mockStreamShouldThrow = true
280+
const result = await enhancePrompt("fix the bug")
281+
expect(result).toBe("fix the bug")
282+
})
283+
284+
test("returns original text when LLM returns empty string", async () => {
285+
mockStreamResult = ""
286+
const result = await enhancePrompt("fix the bug")
287+
expect(result).toBe("fix the bug")
288+
})
289+
290+
test("handles LLM response with only think tags (no content)", async () => {
291+
mockStreamResult = "<think>I should enhance this</think>"
292+
const result = await enhancePrompt("fix the bug")
293+
expect(result).toBe("fix the bug")
294+
})
295+
296+
test("handles unclosed think tag in LLM response", async () => {
297+
mockStreamResult = "<think>reasoning cut off by token limit"
298+
const result = await enhancePrompt("fix the bug")
299+
expect(result).toBe("fix the bug")
300+
})
301+
302+
test("handles combined think tags + code fences + quotes", async () => {
303+
mockStreamResult = '<think>reasoning</think>```\n"Investigate the failing test."\n```'
304+
const result = await enhancePrompt("fix test")
305+
expect(result).toBe("Investigate the failing test.")
306+
})
307+
})

0 commit comments

Comments
 (0)