@@ -15,6 +15,10 @@ import { useTranslation } from "react-i18next";
1515import type { AgentModelConfig , SkillSummary , MessageContent , ContentBlock } from "@App/app/service/agent/types" ;
1616import { groupModelsByProvider , supportsVision , supportsImageOutput } from "./model_utils" ;
1717import ProviderIcon from "./ProviderIcon" ;
18+ import { buildAnthropicRequest } from "@App/app/service/agent/providers/anthropic" ;
19+ import { buildOpenAIRequest } from "@App/app/service/agent/providers/openai" ;
20+ import { parseAnthropicStream } from "@App/app/service/agent/providers/anthropic" ;
21+ import { parseOpenAIStream } from "@App/app/service/agent/providers/openai" ;
1822
1923// 斜杠命令弹出菜单
2024function SlashCommandMenu ( {
@@ -187,6 +191,7 @@ export default function ChatInput({
187191 const [ attachments , setAttachments ] = useState < PendingAttachment [ ] > ( [ ] ) ;
188192 const [ isDragging , setIsDragging ] = useState ( false ) ;
189193 const [ slashActiveIndex , setSlashActiveIndex ] = useState ( 0 ) ;
194+ const [ isOptimizing , setIsOptimizing ] = useState ( false ) ;
190195 const textareaRef = useRef < HTMLTextAreaElement > ( null ) ;
191196 const fileInputRef = useRef < HTMLInputElement > ( null ) ;
192197
@@ -364,6 +369,83 @@ export default function ChatInput({
364369 e . target . value = "" ;
365370 } ;
366371
372+ const handleOptimizePrompt = async ( ) => {
373+ const trimmed = input . trim ( ) ;
374+ if ( ! trimmed || isOptimizing ) return ;
375+
376+ // Find the currently selected model config
377+ const model = models . find ( ( m ) => m . id === selectedModelId ) || models [ 0 ] ;
378+ if ( ! model ) {
379+ ArcoMessage . error ( "No model available for optimization" ) ;
380+ return ;
381+ }
382+
383+ setIsOptimizing ( true ) ;
384+ const abortController = new AbortController ( ) ;
385+
386+ try {
387+ const chatRequest = {
388+ conversationId : "__prompt_optimizer__" ,
389+ modelId : model . id ,
390+ messages : [
391+ {
392+ role : "system" as const ,
393+ content :
394+ "You are a prompt engineering expert. Rewrite the user's raw input into a clear, structured, and actionable prompt that an AI agent can easily understand and execute. IMPORTANT: Always respond in the same language as the user's input — if they write in Chinese, output in Chinese; if Japanese, output in Japanese; if English, output in English. Preserve the original intent. Output ONLY the optimized prompt text — no explanations, no preamble, no markdown fences." ,
395+ } ,
396+ { role : "user" as const , content : trimmed } ,
397+ ] ,
398+ cache : false ,
399+ } ;
400+
401+ const { url, init } =
402+ model . provider === "anthropic"
403+ ? buildAnthropicRequest ( model , chatRequest )
404+ : buildOpenAIRequest (
405+ model . provider === "zhipu"
406+ ? { ...model , apiBaseUrl : model . apiBaseUrl || "https://open.bigmodel.cn/api/paas/v4" }
407+ : model ,
408+ chatRequest
409+ ) ;
410+
411+ const response = await fetch ( url , { ...init , signal : abortController . signal } ) ;
412+
413+ if ( ! response . ok ) {
414+ throw new Error ( `API error: ${ response . status } ` ) ;
415+ }
416+
417+ const reader = response . body ?. getReader ( ) ;
418+ if ( ! reader ) throw new Error ( "No response body" ) ;
419+
420+ let optimized = "" ;
421+ const parseStream = model . provider === "anthropic" ? parseAnthropicStream : parseOpenAIStream ;
422+
423+ await parseStream (
424+ reader ,
425+ ( event ) => {
426+ if ( event . type === "content_delta" && typeof event . delta === "string" ) {
427+ optimized += event . delta ;
428+ } else if ( event . type === "error" ) {
429+ throw new Error ( event . message ) ;
430+ }
431+ } ,
432+ abortController . signal
433+ ) ;
434+
435+ if ( optimized . trim ( ) ) {
436+ setInput ( optimized . trim ( ) ) ;
437+ textareaRef . current ?. focus ( ) ;
438+ ArcoMessage . success ( "Prompt optimized ✨" ) ;
439+ }
440+ } catch ( err : unknown ) {
441+ if ( err instanceof Error && err . name !== "AbortError" ) {
442+ ArcoMessage . error ( `Optimization failed: ${ err . message } ` ) ;
443+ }
444+ } finally {
445+ setIsOptimizing ( false ) ;
446+ }
447+ } ;
448+
367449 const canSend = ( input . trim ( ) || attachments . length > 0 ) && ! disabled && ! hasPendingMessage ;
368450
369451 return (
@@ -498,6 +580,45 @@ export default function ChatInput({
498580 < path d = "M21.44 11.05l-9.19 9.19a6 6 0 0 1-8.49-8.49l9.19-9.19a4 4 0 0 1 5.66 5.66l-9.2 9.19a2 2 0 0 1-2.83-2.83l8.49-8.48" />
499581 </ svg >
500582 </ button >
583+ { /* Prompt Optimizer */ }
584+ < Tooltip content = { isOptimizing ? "Optimizing..." : "Optimize prompt for Agent" } mini >
585+ < button
586+ onClick = { handleOptimizePrompt }
587+ className = { `tw-w-7 tw-h-7 tw-rounded tw-flex tw-items-center tw-justify-center tw-bg-transparent tw-border-none tw-cursor-pointer tw-transition-colors ${
588+ input . trim ( ) && ! isOptimizing
589+ ? "tw-text-[rgb(var(--arcoblue-6))] hover:tw-bg-[var(--color-fill-2)]"
590+ : "tw-text-[var(--color-text-4)] tw-opacity-40 tw-cursor-not-allowed"
591+ } `}
592+ >
593+ { isOptimizing ? (
594+ < svg
595+ width = "16"
596+ height = "16"
597+ viewBox = "0 0 24 24"
598+ fill = "none"
599+ stroke = "currentColor"
600+ strokeWidth = "2"
601+ strokeLinecap = "round"
602+ style = { { animation : "prompt-opt-spin 0.8s linear infinite" , transformOrigin : "center" } }
603+ >
604+ < path d = "M21 12a9 9 0 1 1-6.219-8.56" />
605+ </ svg >
606+ ) : (
607+ < svg
608+ width = "16"
609+ height = "16"
610+ viewBox = "0 0 24 24"
611+ fill = "none"
612+ stroke = "currentColor"
613+ strokeWidth = "2"
614+ strokeLinecap = "round"
615+ strokeLinejoin = "round"
616+ >
617+ < path d = "M9.663 17h4.673M12 3v1m6.364 1.636l-.707.707M21 12h-1M4 12H3m3.343-5.657l-.707-.707m2.828 9.9a5 5 0 1 1 7.072 0l-.548.547A3.374 3.374 0 0 0 14 18.469V19a2 2 0 1 1-4 0v-.531c0-.895-.356-1.754-.988-2.386l-.548-.547z" />
618+ </ svg >
619+ ) }
620+ </ button >
621+ </ Tooltip >
501622 { onEnableToolsChange && (
502623 < Tooltip
503624 content = {
0 commit comments