@@ -33,25 +33,38 @@ public sealed class CreateMessageRequestParams : RequestParams
3333 /// Gets or sets the maximum number of tokens to generate in the LLM response, as requested by the server.
3434 /// </summary>
3535 /// <remarks>
36+ /// <para>
3637 /// A token is generally a word or part of a word in the text. Setting this value helps control
3738 /// response length and computation time. The client can choose to sample fewer tokens than requested.
39+ /// </para>
40+ /// <para>
41+ /// The client must respect the <see cref="MaxTokens"/> parameter.
42+ /// </para>
3843 /// </remarks>
3944 [ JsonPropertyName ( "maxTokens" ) ]
4045 public required int MaxTokens { get ; set ; }
4146
4247 /// <summary>
4348 /// Gets or sets the messages requested by the server to be included in the prompt.
4449 /// </summary>
50+ /// <remarks>
51+ /// The list of messages in a sampling request should not be retained between separate requests.
52+ /// </remarks>
4553 [ JsonPropertyName ( "messages" ) ]
4654 public IList < SamplingMessage > Messages { get ; set ; } = [ ] ;
4755
4856 /// <summary>
4957 /// Gets or sets optional metadata to pass through to the LLM provider.
5058 /// </summary>
5159 /// <remarks>
60+ /// <para>
5261 /// The format of this metadata is provider-specific and can include model-specific settings or
5362 /// configuration that isn't covered by standard parameters. This allows for passing custom parameters
5463 /// that are specific to certain AI models or providers.
64+ /// </para>
65+ /// <para>
66+ /// The client may modify or ignore metadata.
67+ /// </para>
5568 /// </remarks>
5669 [ JsonPropertyName ( "metadata" ) ]
5770 public JsonElement ? Metadata { get ; set ; }
@@ -90,6 +103,9 @@ public sealed class CreateMessageRequestParams : RequestParams
90103 /// sequence exactly matches one of the provided sequences. Common uses include ending markers like "END", punctuation
91104 /// like ".", or special delimiter sequences like "###".
92105 /// </para>
106+ /// <para>
107+ /// The client may modify or ignore stop sequences.
108+ /// </para>
93109 /// </remarks>
94110 [ JsonPropertyName ( "stopSequences" ) ]
95111 public IList < string > ? StopSequences { get ; set ; }
@@ -106,18 +122,34 @@ public sealed class CreateMessageRequestParams : RequestParams
106122 /// <summary>
107123 /// Gets or sets the temperature to use for sampling, as requested by the server.
108124 /// </summary>
125+ /// <remarks>
126+ /// <para>
127+ /// Temperature controls randomness in model responses. Higher values produce higher randomness,
128+ /// and lower values produce more stable output. The valid range depends on the model provider.
129+ /// </para>
130+ /// <para>
131+ /// The client may modify or ignore this value.
132+ /// </para>
133+ /// </remarks>
109134 [ JsonPropertyName ( "temperature" ) ]
110135 public float ? Temperature { get ; set ; }
111136
112137 /// <summary>
113138 /// Gets or sets tools that the model can use during generation.
114139 /// </summary>
140+ /// <remarks>
141+ /// The tool definitions in this array are scoped to this sampling request.
142+ /// They do not need to correspond to tools registered on the server via <see cref="RequestMethods.ToolsList"/>.
143+ /// </remarks>
115144 [ JsonPropertyName ( "tools" ) ]
116145 public IList < Tool > ? Tools { get ; set ; }
117146
118147 /// <summary>
119148 /// Gets or sets controls for how the model uses tools.
120149 /// </summary>
150+ /// <remarks>
151+ /// This controls whether and how the model uses the request-scoped <see cref="Tools"/> during sampling.
152+ /// </remarks>
121153 [ JsonPropertyName ( "toolChoice" ) ]
122154 public ToolChoice ? ToolChoice { get ; set ; }
123155
0 commit comments