Skip to content

Commit 4b855e6

Browse files
BenjaminKazemicopybara-github
authored andcommitted
fix: Add missing fields to the model types
PiperOrigin-RevId: 829067545
1 parent f5eae63 commit 4b855e6

File tree

3 files changed

+77
-4
lines changed

3 files changed

+77
-4
lines changed

google/genai/models.py

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2759,6 +2759,21 @@ def _Model_from_mldev(
27592759
getv(from_object, ['supportedGenerationMethods']),
27602760
)
27612761

2762+
if getv(from_object, ['temperature']) is not None:
2763+
setv(to_object, ['temperature'], getv(from_object, ['temperature']))
2764+
2765+
if getv(from_object, ['maxTemperature']) is not None:
2766+
setv(to_object, ['max_temperature'], getv(from_object, ['maxTemperature']))
2767+
2768+
if getv(from_object, ['topP']) is not None:
2769+
setv(to_object, ['top_p'], getv(from_object, ['topP']))
2770+
2771+
if getv(from_object, ['topK']) is not None:
2772+
setv(to_object, ['top_k'], getv(from_object, ['topK']))
2773+
2774+
if getv(from_object, ['thinking']) is not None:
2775+
setv(to_object, ['thinking'], getv(from_object, ['thinking']))
2776+
27622777
return to_object
27632778

27642779

google/genai/tests/models/test_get.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -52,20 +52,19 @@
5252
pytest_helper.TestTableItem(
5353
name='test_get_mldev_base_model_with_http_options_in_method',
5454
parameters=types._GetModelParameters(
55-
model='gemini-1.5-flash',
55+
model='gemini-2.5-flash',
5656
config={
5757
'http_options': test_http_options,
5858
},
5959
),
60-
exception_if_vertex='404',
6160
),
6261
pytest_helper.TestTableItem(
6362
name='test_get_base_model',
64-
parameters=types._GetModelParameters(model='gemini-1.5-flash'),
63+
parameters=types._GetModelParameters(model='gemini-2.5-flash'),
6564
),
6665
pytest_helper.TestTableItem(
6766
name='test_get_base_model_with_models_prefix',
68-
parameters=types._GetModelParameters(model='models/gemini-1.5-flash'),
67+
parameters=types._GetModelParameters(model='models/gemini-2.5-flash'),
6968
exception_if_vertex='400',
7069
),
7170
]

google/genai/types.py

Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8479,6 +8479,38 @@ class Model(_common.BaseModel):
84798479
checkpoints: Optional[list[Checkpoint]] = Field(
84808480
default=None, description="""The checkpoints of the model."""
84818481
)
8482+
temperature: Optional[float] = Field(
8483+
default=None,
8484+
description="""Temperature value used for sampling set when the dataset was saved.
8485+
This value is used to tune the degree of randomness.""",
8486+
)
8487+
max_temperature: Optional[float] = Field(
8488+
default=None,
8489+
description="""The maximum temperature value used for sampling set when the
8490+
dataset was saved. This value is used to tune the degree of randomness.""",
8491+
)
8492+
top_p: Optional[float] = Field(
8493+
default=None,
8494+
description="""Optional. Specifies the nucleus sampling threshold. The model
8495+
considers only the smallest set of tokens whose cumulative probability is
8496+
at least `top_p`. This helps generate more diverse and less repetitive
8497+
responses. For example, a `top_p` of 0.9 means the model considers tokens
8498+
until the cumulative probability of the tokens to select from reaches 0.9.
8499+
It's recommended to adjust either temperature or `top_p`, but not both.""",
8500+
)
8501+
top_k: Optional[int] = Field(
8502+
default=None,
8503+
description="""Optional. Specifies the top-k sampling threshold. The model
8504+
considers only the top k most probable tokens for the next token. This can
8505+
be useful for generating more coherent and less random text. For example,
8506+
a `top_k` of 40 means the model will choose the next word from the 40 most
8507+
likely words.""",
8508+
)
8509+
thinking: Optional[bool] = Field(
8510+
default=None,
8511+
description="""Whether the model supports thinking features. If true, thoughts are
8512+
returned only if the model supports thought and thoughts are available.""",
8513+
)
84828514

84838515

84848516
class ModelDict(TypedDict, total=False):
@@ -8525,6 +8557,33 @@ class ModelDict(TypedDict, total=False):
85258557
checkpoints: Optional[list[CheckpointDict]]
85268558
"""The checkpoints of the model."""
85278559

8560+
temperature: Optional[float]
8561+
"""Temperature value used for sampling set when the dataset was saved.
8562+
This value is used to tune the degree of randomness."""
8563+
8564+
max_temperature: Optional[float]
8565+
"""The maximum temperature value used for sampling set when the
8566+
dataset was saved. This value is used to tune the degree of randomness."""
8567+
8568+
top_p: Optional[float]
8569+
"""Optional. Specifies the nucleus sampling threshold. The model
8570+
considers only the smallest set of tokens whose cumulative probability is
8571+
at least `top_p`. This helps generate more diverse and less repetitive
8572+
responses. For example, a `top_p` of 0.9 means the model considers tokens
8573+
until the cumulative probability of the tokens to select from reaches 0.9.
8574+
It's recommended to adjust either temperature or `top_p`, but not both."""
8575+
8576+
top_k: Optional[int]
8577+
"""Optional. Specifies the top-k sampling threshold. The model
8578+
considers only the top k most probable tokens for the next token. This can
8579+
be useful for generating more coherent and less random text. For example,
8580+
a `top_k` of 40 means the model will choose the next word from the 40 most
8581+
likely words."""
8582+
8583+
thinking: Optional[bool]
8584+
"""Whether the model supports thinking features. If true, thoughts are
8585+
returned only if the model supports thought and thoughts are available."""
8586+
85288587

85298588
ModelOrDict = Union[Model, ModelDict]
85308589

0 commit comments

Comments
 (0)