Skip to content

Commit 47d0781

Browse files
Remove max_tokens implementation detail from doc comments
Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com>
1 parent d0d283c commit 47d0781

4 files changed

Lines changed: 3 additions & 7 deletions

File tree

dotnet/src/Types.cs

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1118,8 +1118,7 @@ public class ProviderConfig
11181118

11191119
/// <summary>
11201120
/// Maximum number of tokens the model can generate in a single response.
1121-
/// Sent as <c>max_tokens</c> per LLM API request. When hit, the model stops
1122-
/// generating and returns a truncated response.
1121+
/// When hit, the model stops generating and returns a truncated response.
11231122
/// </summary>
11241123
[JsonPropertyName("maxOutputTokens")]
11251124
public int? MaxOutputTokens { get; set; }

go/types.go

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -602,8 +602,7 @@ type ProviderConfig struct {
602602
// Azure contains Azure-specific options
603603
Azure *AzureProviderOptions `json:"azure,omitempty"`
604604
// MaxOutputTokens is the maximum number of tokens the model can generate in a single response.
605-
// Sent as max_tokens per LLM API request. When hit, the model stops generating and returns
606-
// a truncated response.
605+
// When hit, the model stops generating and returns a truncated response.
607606
MaxOutputTokens int `json:"maxOutputTokens,omitempty"`
608607
// MaxPromptTokens is the maximum number of tokens allowed in the prompt for a single LLM API
609608
// request. Used by the runtime to trigger conversation compaction before sending a request

nodejs/src/types.ts

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1263,7 +1263,6 @@ export interface ProviderConfig {
12631263

12641264
/**
12651265
* Maximum number of tokens the model can generate in a single response.
1266-
* Sent as {@link https://platform.openai.com/docs/api-reference/chat/create#chat-create-max_tokens max_tokens} per LLM API request.
12671266
* When hit, the model stops generating and returns a truncated response.
12681267
*/
12691268
maxOutputTokens?: number;

python/copilot/session.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -508,8 +508,7 @@ class ProviderConfig(TypedDict, total=False):
508508
bearer_token: str
509509
azure: AzureProviderOptions # Azure-specific options
510510
# Maximum number of tokens the model can generate in a single response.
511-
# Sent as max_tokens per LLM API request. When hit, the model stops
512-
# generating and returns a truncated response.
511+
# When hit, the model stops generating and returns a truncated response.
513512
max_output_tokens: int
514513
# Maximum number of tokens allowed in the prompt for a single LLM API request.
515514
# Used by the runtime to trigger conversation compaction before sending a

0 commit comments

Comments
 (0)