We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent c8afa6f commit 1465ff0Copy full SHA for 1465ff0
1 file changed
src/provider.ts
@@ -21,7 +21,9 @@ const DEFAULT_MAX_OUTPUT_TOKENS = 16000;
21
// Token estimates for gpt‑oss are correct as we use the appropriate tokenizer.
22
// For Qwen we must first create the tokenizer from the model, as it does not use tiktoken.
23
// As a workaround, we also use the gpt‑oss tokenizer for now and reduce the max context length here.
24
-const DEFAULT_CONTEXT_LENGTH = 120000;
+//
25
+// Further reduced to avoid running into rate limits for free users.
26
+const DEFAULT_CONTEXT_LENGTH = 96000;
27
28
/**
29
* VS Code Chat provider backed by Privatemode OpenAI API.
0 commit comments