Skip to content

Commit 233d006

Browse files
Copilotpelikhan
andauthored
Fix quiet mode to suppress runPrompt console output (#1857)
* Initial plan * Fix quiet mode for runPrompt console output Co-authored-by: pelikhan <4175913+pelikhan@users.noreply.github.com> * Add comprehensive test for quiet mode console output Co-authored-by: pelikhan <4175913+pelikhan@users.noreply.github.com> * Add documentation and demonstration for quiet mode fix Co-authored-by: pelikhan <4175913+pelikhan@users.noreply.github.com> * Revert files as requested by @pelikhan Co-authored-by: pelikhan <4175913+pelikhan@users.noreply.github.com> * Add `--issue` flag to CLI commands for GitHub issue creation and update output handling in runScriptInternal * Refactor poem output handling to display poem length instead of content * Refactor import statements to use type imports for better clarity and performance * Enhance runScriptInternal function to improve output handling and formatting * Refactor output handling in createChatTurnGenerationContext and poem-inline.genai.mts for improved clarity --------- Co-authored-by: Copilot <198982749+Copilot@users.noreply.github.com> Co-authored-by: pelikhan <4175913+pelikhan@users.noreply.github.com> Co-authored-by: Peli de Halleux <pelikhan@users.noreply.github.com>
1 parent 8acdcf7 commit 233d006

5 files changed

Lines changed: 16 additions & 9 deletions

File tree

packages/api/src/run.ts

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -416,10 +416,12 @@ export async function runScriptInternal(
416416
}
417417
if (responseChunk !== undefined && responseChunk !== null && responseChunk !== "") {
418418
if (reasoningOutput) {
419-
stderr.write(reasoningEndMarker);
419+
if (!isQuiet) {
420+
stderr.write(reasoningEndMarker);
421+
}
420422
reasoningOutput = false;
421423
}
422-
if (stream) {
424+
if (stream && !(inner && isQuiet)) {
423425
if (responseTokens && consoleColors) {
424426
const colors = inner ? CONSOLE_TOKEN_INNER_COLORS : CONSOLE_TOKEN_COLORS;
425427
for (const token of responseTokens) {
@@ -435,7 +437,7 @@ export async function runScriptInternal(
435437
} else {
436438
if (!inner) stdout.write(responseChunk);
437439
else {
438-
stderr.write(wrapColor(CONSOLE_COLOR_DEBUG, responseChunk));
440+
if (!isQuiet) stderr.write(wrapColor(CONSOLE_COLOR_DEBUG, responseChunk));
439441
}
440442
}
441443
} else if (!isQuiet) {
@@ -752,10 +754,10 @@ export async function runScriptInternal(
752754
}))
753755
) {
754756
// Generate title (simple approach for now, can be enhanced with small model later)
755-
const firstLine = result.text.split('\n')[0]?.trim();
756-
const shortContent = firstLine ? firstLine.substring(0, 60) : '';
757+
const firstLine = result.text.split("\n")[0]?.trim();
758+
const shortContent = firstLine ? firstLine.substring(0, 60) : "";
757759
let title = shortContent ? `${script.id}: ${shortContent}` : `Generated by ${script.id}`;
758-
760+
759761
// TODO: Enhance with small model title generation
760762
// This could be implemented by creating a temporary script and running it with the small model,
761763
// but for now we use a simpler approach to avoid complexity.

packages/cli/src/openaiapi.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
// Copyright (c) Microsoft Corporation.
22
// Licensed under the MIT License.
33

4-
import { IncomingMessage, ServerResponse } from "http";
4+
import type { IncomingMessage, ServerResponse } from "http";
55
import type {
66
CancellationOptions,
77
ChatCompletion,

packages/core/src/openai.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ const dbgMessages = dbg.extend("msg");
4545
dbgMessages.enabled = false;
4646

4747
export const OpenAIChatCompletion: ChatCompletionHandler = async (req, cfg, options, trace) => {
48-
//const { provider } = parseModelIdentifier(req.model);
48+
// const { provider } = parseModelIdentifier(req.model);
4949
// const features = providerFeatures(provider);
5050
const useResponsesApi = cfg.type === "responses";
5151
if (useResponsesApi) return OpenAIv2ResponsesChatCompletion(req, cfg, options, trace);

packages/core/src/runpromptcontext.ts

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -94,6 +94,7 @@ import { toBase64 } from "./base64.js";
9494
import { consoleColors } from "./consolecolor.js";
9595
import { terminalSize } from "./terminal.js";
9696
import { stderr, stdout } from "./stdio.js";
97+
import { isQuiet } from "./quiet.js";
9798
import { dotGenaiscriptPath } from "./workdir.js";
9899
import { prettyBytes } from "./pretty.js";
99100
import { createCache } from "./cache.js";
@@ -1098,7 +1099,7 @@ export function createChatGenerationContext(
10981099
const filename = dotGenaiscriptPath("image", h + "." + ext);
10991100
await runtimeHost.writeFile(filename, buf);
11001101

1101-
if (consoleColors) {
1102+
if (consoleColors && !isQuiet) {
11021103
const size = terminalSize();
11031104
stderr.write(
11041105
await renderImageToTerminal(buf, {
Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
const { output } = env;
2+
const res = await prompt`Write a short poem in code.`;
3+
output.item(`llm poem: ${res.text.length}`)
4+
output.fence(res.text)

0 commit comments

Comments
 (0)