forked from googleapis/google-api-python-client
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathaiplatform_v1.endpoints.html
More file actions
2498 lines (2438 loc) · 322 KB
/
aiplatform_v1.endpoints.html
File metadata and controls
2498 lines (2438 loc) · 322 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
<html><body>
<style>
body, h1, h2, h3, div, span, p, pre, a {
margin: 0;
padding: 0;
border: 0;
font-weight: inherit;
font-style: inherit;
font-size: 100%;
font-family: inherit;
vertical-align: baseline;
}
body {
font-size: 13px;
padding: 1em;
}
h1 {
font-size: 26px;
margin-bottom: 1em;
}
h2 {
font-size: 24px;
margin-bottom: 1em;
}
h3 {
font-size: 20px;
margin-bottom: 1em;
margin-top: 1em;
}
pre, code {
line-height: 1.5;
font-family: Monaco, 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', 'Lucida Console', monospace;
}
pre {
margin-top: 0.5em;
}
h1, h2, h3, p {
font-family: Arial, sans serif;
}
h1, h2, h3 {
border-bottom: solid #CCC 1px;
}
.toc_element {
margin-top: 0.5em;
}
.firstline {
margin-left: 2 em;
}
.method {
margin-top: 1em;
border: solid 1px #CCC;
padding: 1em;
background: #EEE;
}
.details {
font-weight: bold;
font-size: 14px;
}
</style>
<h1><a href="aiplatform_v1.html">Vertex AI API</a> . <a href="aiplatform_v1.endpoints.html">endpoints</a></h1>
<h2>Instance Methods</h2>
<p class="toc_element">
<code><a href="aiplatform_v1.endpoints.chat.html">chat()</a></code>
</p>
<p class="firstline">Returns the chat Resource.</p>
<p class="toc_element">
<code><a href="aiplatform_v1.endpoints.operations.html">operations()</a></code>
</p>
<p class="firstline">Returns the operations Resource.</p>
<p class="toc_element">
<code><a href="#close">close()</a></code></p>
<p class="firstline">Close httplib2 connections.</p>
<p class="toc_element">
<code><a href="#computeTokens">computeTokens(endpoint, body=None, x__xgafv=None)</a></code></p>
<p class="firstline">Return a list of tokens based on the input text.</p>
<p class="toc_element">
<code><a href="#countTokens">countTokens(endpoint, body=None, x__xgafv=None)</a></code></p>
<p class="firstline">Perform a token counting.</p>
<p class="toc_element">
<code><a href="#fetchPredictOperation">fetchPredictOperation(endpoint, body=None, x__xgafv=None)</a></code></p>
<p class="firstline">Fetch an asynchronous online prediction operation.</p>
<p class="toc_element">
<code><a href="#generateContent">generateContent(model, body=None, x__xgafv=None)</a></code></p>
<p class="firstline">Generate content with multimodal inputs.</p>
<p class="toc_element">
<code><a href="#predict">predict(endpoint, body=None, x__xgafv=None)</a></code></p>
<p class="firstline">Perform an online prediction.</p>
<p class="toc_element">
<code><a href="#predictLongRunning">predictLongRunning(endpoint, body=None, x__xgafv=None)</a></code></p>
<p class="firstline"></p>
<p class="toc_element">
<code><a href="#streamGenerateContent">streamGenerateContent(model, body=None, x__xgafv=None)</a></code></p>
<p class="firstline">Generate content with multimodal inputs with streaming support.</p>
<h3>Method Details</h3>
<div class="method">
<code class="details" id="close">close()</code>
<pre>Close httplib2 connections.</pre>
</div>
<div class="method">
<code class="details" id="computeTokens">computeTokens(endpoint, body=None, x__xgafv=None)</code>
<pre>Return a list of tokens based on the input text.
Args:
endpoint: string, Required. The name of the Endpoint requested to get lists of tokens and token ids. (required)
body: object, The request body.
The object takes the form of:
{ # Request message for ComputeTokens RPC call.
"contents": [ # Optional. Input content.
{ # The structured data content of a message. A Content message contains a `role` field, which indicates the producer of the content, and a `parts` field, which contains the multi-part data of the message.
"parts": [ # Required. A list of Part objects that make up a single message. Parts of a message can have different MIME types. A Content message must have at least one Part.
{ # A datatype containing media that is part of a multi-part Content message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. For media types that are not text, `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes.
"codeExecutionResult": { # Result of executing the ExecutableCode. Generated only when the `CodeExecution` tool is used. # Optional. The result of executing the ExecutableCode.
"outcome": "A String", # Required. Outcome of the code execution.
"output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise.
},
"executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the `CodeExecution` tool, in which the code will be automatically executed, and a corresponding CodeExecutionResult will also be generated. # Optional. Code generated by the model that is intended to be executed.
"code": "A String", # Required. The code to be executed.
"language": "A String", # Required. Programming language of the `code`.
},
"fileData": { # URI-based data. A FileData message contains a URI pointing to data of a specific media type. It is used to represent images, audio, and video stored in Google Cloud Storage. # Optional. The URI-based data of the part. This can be used to include files from Google Cloud Storage.
"displayName": "A String", # Optional. The display name of the file. Used to provide a label or filename to distinguish files. This field is only returned in `PromptMessage` for prompt management. It is used in the Gemini calls only when server side tools (`code_execution`, `google_search`, and `url_context`) are enabled.
"fileUri": "A String", # Required. The URI of the file in Google Cloud Storage.
"mimeType": "A String", # Required. The IANA standard MIME type of the source data.
},
"functionCall": { # A predicted FunctionCall returned from the model that contains a string representing the FunctionDeclaration.name and a structured JSON object containing the parameters and their values. # Optional. A predicted function call returned from the model. This contains the name of the function to call and the arguments to pass to the function.
"args": { # Optional. The function parameters and values in JSON object format. See FunctionDeclaration.parameters for parameter details.
"a_key": "", # Properties of the object.
},
"name": "A String", # Optional. The name of the function to call. Matches FunctionDeclaration.name.
"partialArgs": [ # Optional. The partial argument value of the function call. If provided, represents the arguments/fields that are streamed incrementally.
{ # Partial argument value of the function call.
"boolValue": True or False, # Optional. Represents a boolean value.
"jsonPath": "A String", # Required. A JSON Path (RFC 9535) to the argument being streamed. https://datatracker.ietf.org/doc/html/rfc9535. e.g. "$.foo.bar[0].data".
"nullValue": "A String", # Optional. Represents a null value.
"numberValue": 3.14, # Optional. Represents a double value.
"stringValue": "A String", # Optional. Represents a string value.
"willContinue": True or False, # Optional. Whether this is not the last part of the same json_path. If true, another PartialArg message for the current json_path is expected to follow.
},
],
"willContinue": True or False, # Optional. Whether this is the last part of the FunctionCall. If true, another partial message for the current FunctionCall is expected to follow.
},
"functionResponse": { # The result output from a FunctionCall that contains a string representing the FunctionDeclaration.name and a structured JSON object containing any output from the function is used as context to the model. This should contain the result of a `FunctionCall` made based on model prediction. # Optional. The result of a function call. This is used to provide the model with the result of a function call that it predicted.
"name": "A String", # Required. The name of the function to call. Matches FunctionDeclaration.name and FunctionCall.name.
"parts": [ # Optional. Ordered `Parts` that constitute a function response. Parts may have different IANA MIME types.
{ # A datatype containing media that is part of a `FunctionResponse` message. A `FunctionResponsePart` consists of data which has an associated datatype. A `FunctionResponsePart` can only contain one of the accepted types in `FunctionResponsePart.data`. A `FunctionResponsePart` must have a fixed IANA MIME type identifying the type and subtype of the media if the `inline_data` field is filled with raw bytes.
"fileData": { # URI based data for function response. # URI based data.
"displayName": "A String", # Optional. Display name of the file data. Used to provide a label or filename to distinguish file datas. This field is only returned in PromptMessage for prompt management. It is currently used in the Gemini GenerateContent calls only when server side tools (code_execution, google_search, and url_context) are enabled.
"fileUri": "A String", # Required. URI.
"mimeType": "A String", # Required. The IANA standard MIME type of the source data.
},
"inlineData": { # Raw media bytes for function response. Text should not be sent as raw bytes, use the 'text' field. # Inline media bytes.
"data": "A String", # Required. Raw bytes.
"displayName": "A String", # Optional. Display name of the blob. Used to provide a label or filename to distinguish blobs. This field is only returned in PromptMessage for prompt management. It is currently used in the Gemini GenerateContent calls only when server side tools (code_execution, google_search, and url_context) are enabled.
"mimeType": "A String", # Required. The IANA standard MIME type of the source data.
},
},
],
"response": { # Required. The function response in JSON object format. Use "output" key to specify function output and "error" key to specify error details (if any). If "output" and "error" keys are not specified, then whole "response" is treated as function output.
"a_key": "", # Properties of the object.
},
"scheduling": "A String", # Optional. Specifies how the response should be scheduled in the conversation. Only applicable to NON_BLOCKING function calls, is ignored otherwise. Defaults to WHEN_IDLE.
},
"inlineData": { # A content blob. A Blob contains data of a specific media type. It is used to represent images, audio, and video. # Optional. The inline data content of the part. This can be used to include images, audio, or video in a request.
"data": "A String", # Required. The raw bytes of the data.
"displayName": "A String", # Optional. The display name of the blob. Used to provide a label or filename to distinguish blobs. This field is only returned in `PromptMessage` for prompt management. It is used in the Gemini calls only when server-side tools (`code_execution`, `google_search`, and `url_context`) are enabled.
"mimeType": "A String", # Required. The IANA standard MIME type of the source data.
},
"mediaResolution": { # per part media resolution. Media resolution for the input media. # per part media resolution. Media resolution for the input media.
"level": "A String", # The tokenization quality used for given media.
},
"text": "A String", # Optional. The text content of the part. When sent from the VSCode Gemini Code Assist extension, references to @mentioned items will be converted to markdown boldface text. For example `@my-repo` will be converted to and sent as `**my-repo**` by the IDE agent.
"thought": True or False, # Optional. Indicates whether the `part` represents the model's thought process or reasoning.
"thoughtSignature": "A String", # Optional. An opaque signature for the thought so it can be reused in subsequent requests.
"videoMetadata": { # Provides metadata for a video, including the start and end offsets for clipping and the frame rate. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data.
"endOffset": "A String", # Optional. The end offset of the video.
"fps": 3.14, # Optional. The frame rate of the video sent to the model. If not specified, the default value is 1.0. The valid range is (0.0, 24.0].
"startOffset": "A String", # Optional. The start offset of the video.
},
},
],
"role": "A String", # Optional. The producer of the content. Must be either 'user' or 'model'. If not set, the service will default to 'user'.
},
],
"instances": [ # Optional. The instances that are the input to token computing API call. Schema is identical to the prediction schema of the text model, even for the non-text models, like chat models, or Codey models.
"",
],
"model": "A String", # Optional. The name of the publisher model requested to serve the prediction. Format: projects/{project}/locations/{location}/publishers/*/models/*
}
x__xgafv: string, V1 error format.
Allowed values
1 - v1 error format
2 - v2 error format
Returns:
An object of the form:
{ # Response message for ComputeTokens RPC call.
"tokensInfo": [ # Lists of tokens info from the input. A ComputeTokensRequest could have multiple instances with a prompt in each instance. We also need to return lists of tokens info for the request with multiple instances.
{ # Tokens info with a list of tokens and the corresponding list of token ids.
"role": "A String", # Optional. Optional fields for the role from the corresponding Content.
"tokenIds": [ # A list of token ids from the input.
"A String",
],
"tokens": [ # A list of tokens from the input.
"A String",
],
},
],
}</pre>
</div>
<div class="method">
<code class="details" id="countTokens">countTokens(endpoint, body=None, x__xgafv=None)</code>
<pre>Perform a token counting.
Args:
endpoint: string, Required. The name of the Endpoint requested to perform token counting. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` (required)
body: object, The request body.
The object takes the form of:
{ # Request message for PredictionService.CountTokens.
"contents": [ # Optional. Input content.
{ # The structured data content of a message. A Content message contains a `role` field, which indicates the producer of the content, and a `parts` field, which contains the multi-part data of the message.
"parts": [ # Required. A list of Part objects that make up a single message. Parts of a message can have different MIME types. A Content message must have at least one Part.
{ # A datatype containing media that is part of a multi-part Content message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. For media types that are not text, `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes.
"codeExecutionResult": { # Result of executing the ExecutableCode. Generated only when the `CodeExecution` tool is used. # Optional. The result of executing the ExecutableCode.
"outcome": "A String", # Required. Outcome of the code execution.
"output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise.
},
"executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the `CodeExecution` tool, in which the code will be automatically executed, and a corresponding CodeExecutionResult will also be generated. # Optional. Code generated by the model that is intended to be executed.
"code": "A String", # Required. The code to be executed.
"language": "A String", # Required. Programming language of the `code`.
},
"fileData": { # URI-based data. A FileData message contains a URI pointing to data of a specific media type. It is used to represent images, audio, and video stored in Google Cloud Storage. # Optional. The URI-based data of the part. This can be used to include files from Google Cloud Storage.
"displayName": "A String", # Optional. The display name of the file. Used to provide a label or filename to distinguish files. This field is only returned in `PromptMessage` for prompt management. It is used in the Gemini calls only when server side tools (`code_execution`, `google_search`, and `url_context`) are enabled.
"fileUri": "A String", # Required. The URI of the file in Google Cloud Storage.
"mimeType": "A String", # Required. The IANA standard MIME type of the source data.
},
"functionCall": { # A predicted FunctionCall returned from the model that contains a string representing the FunctionDeclaration.name and a structured JSON object containing the parameters and their values. # Optional. A predicted function call returned from the model. This contains the name of the function to call and the arguments to pass to the function.
"args": { # Optional. The function parameters and values in JSON object format. See FunctionDeclaration.parameters for parameter details.
"a_key": "", # Properties of the object.
},
"name": "A String", # Optional. The name of the function to call. Matches FunctionDeclaration.name.
"partialArgs": [ # Optional. The partial argument value of the function call. If provided, represents the arguments/fields that are streamed incrementally.
{ # Partial argument value of the function call.
"boolValue": True or False, # Optional. Represents a boolean value.
"jsonPath": "A String", # Required. A JSON Path (RFC 9535) to the argument being streamed. https://datatracker.ietf.org/doc/html/rfc9535. e.g. "$.foo.bar[0].data".
"nullValue": "A String", # Optional. Represents a null value.
"numberValue": 3.14, # Optional. Represents a double value.
"stringValue": "A String", # Optional. Represents a string value.
"willContinue": True or False, # Optional. Whether this is not the last part of the same json_path. If true, another PartialArg message for the current json_path is expected to follow.
},
],
"willContinue": True or False, # Optional. Whether this is the last part of the FunctionCall. If true, another partial message for the current FunctionCall is expected to follow.
},
"functionResponse": { # The result output from a FunctionCall that contains a string representing the FunctionDeclaration.name and a structured JSON object containing any output from the function is used as context to the model. This should contain the result of a `FunctionCall` made based on model prediction. # Optional. The result of a function call. This is used to provide the model with the result of a function call that it predicted.
"name": "A String", # Required. The name of the function to call. Matches FunctionDeclaration.name and FunctionCall.name.
"parts": [ # Optional. Ordered `Parts` that constitute a function response. Parts may have different IANA MIME types.
{ # A datatype containing media that is part of a `FunctionResponse` message. A `FunctionResponsePart` consists of data which has an associated datatype. A `FunctionResponsePart` can only contain one of the accepted types in `FunctionResponsePart.data`. A `FunctionResponsePart` must have a fixed IANA MIME type identifying the type and subtype of the media if the `inline_data` field is filled with raw bytes.
"fileData": { # URI based data for function response. # URI based data.
"displayName": "A String", # Optional. Display name of the file data. Used to provide a label or filename to distinguish file datas. This field is only returned in PromptMessage for prompt management. It is currently used in the Gemini GenerateContent calls only when server side tools (code_execution, google_search, and url_context) are enabled.
"fileUri": "A String", # Required. URI.
"mimeType": "A String", # Required. The IANA standard MIME type of the source data.
},
"inlineData": { # Raw media bytes for function response. Text should not be sent as raw bytes, use the 'text' field. # Inline media bytes.
"data": "A String", # Required. Raw bytes.
"displayName": "A String", # Optional. Display name of the blob. Used to provide a label or filename to distinguish blobs. This field is only returned in PromptMessage for prompt management. It is currently used in the Gemini GenerateContent calls only when server side tools (code_execution, google_search, and url_context) are enabled.
"mimeType": "A String", # Required. The IANA standard MIME type of the source data.
},
},
],
"response": { # Required. The function response in JSON object format. Use "output" key to specify function output and "error" key to specify error details (if any). If "output" and "error" keys are not specified, then whole "response" is treated as function output.
"a_key": "", # Properties of the object.
},
"scheduling": "A String", # Optional. Specifies how the response should be scheduled in the conversation. Only applicable to NON_BLOCKING function calls, is ignored otherwise. Defaults to WHEN_IDLE.
},
"inlineData": { # A content blob. A Blob contains data of a specific media type. It is used to represent images, audio, and video. # Optional. The inline data content of the part. This can be used to include images, audio, or video in a request.
"data": "A String", # Required. The raw bytes of the data.
"displayName": "A String", # Optional. The display name of the blob. Used to provide a label or filename to distinguish blobs. This field is only returned in `PromptMessage` for prompt management. It is used in the Gemini calls only when server-side tools (`code_execution`, `google_search`, and `url_context`) are enabled.
"mimeType": "A String", # Required. The IANA standard MIME type of the source data.
},
"mediaResolution": { # per part media resolution. Media resolution for the input media. # per part media resolution. Media resolution for the input media.
"level": "A String", # The tokenization quality used for given media.
},
"text": "A String", # Optional. The text content of the part. When sent from the VSCode Gemini Code Assist extension, references to @mentioned items will be converted to markdown boldface text. For example `@my-repo` will be converted to and sent as `**my-repo**` by the IDE agent.
"thought": True or False, # Optional. Indicates whether the `part` represents the model's thought process or reasoning.
"thoughtSignature": "A String", # Optional. An opaque signature for the thought so it can be reused in subsequent requests.
"videoMetadata": { # Provides metadata for a video, including the start and end offsets for clipping and the frame rate. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data.
"endOffset": "A String", # Optional. The end offset of the video.
"fps": 3.14, # Optional. The frame rate of the video sent to the model. If not specified, the default value is 1.0. The valid range is (0.0, 24.0].
"startOffset": "A String", # Optional. The start offset of the video.
},
},
],
"role": "A String", # Optional. The producer of the content. Must be either 'user' or 'model'. If not set, the service will default to 'user'.
},
],
"generationConfig": { # Configuration for content generation. This message contains all the parameters that control how the model generates content. It allows you to influence the randomness, length, and structure of the output. # Optional. Generation config that the model will use to generate the response.
"audioTimestamp": True or False, # Optional. If enabled, audio timestamps will be included in the request to the model. This can be useful for synchronizing audio with other modalities in the response.
"candidateCount": 42, # Optional. The number of candidate responses to generate. A higher `candidate_count` can provide more options to choose from, but it also consumes more resources. This can be useful for generating a variety of responses and selecting the best one.
"enableAffectiveDialog": True or False, # Optional. If enabled, the model will detect emotions and adapt its responses accordingly. For example, if the model detects that the user is frustrated, it may provide a more empathetic response.
"frequencyPenalty": 3.14, # Optional. Penalizes tokens based on their frequency in the generated text. A positive value helps to reduce the repetition of words and phrases. Valid values can range from [-2.0, 2.0].
"imageConfig": { # Configuration for image generation. This message allows you to control various aspects of image generation, such as the output format, aspect ratio, and whether the model can generate images of people. # Optional. Config for image generation features.
"aspectRatio": "A String", # Optional. The desired aspect ratio for the generated images. The following aspect ratios are supported: "1:1" "2:3", "3:2" "3:4", "4:3" "4:5", "5:4" "9:16", "16:9" "21:9"
"imageOutputOptions": { # The image output format for generated images. # Optional. The image output format for generated images.
"compressionQuality": 42, # Optional. The compression quality of the output image.
"mimeType": "A String", # Optional. The image format that the output should be saved as.
},
"imageSize": "A String", # Optional. Specifies the size of generated images. Supported values are `1K`, `2K`, `4K`. If not specified, the model will use default value `1K`.
"personGeneration": "A String", # Optional. Controls whether the model can generate people.
"prominentPeople": "A String", # Optional. Controls whether prominent people (celebrities) generation is allowed. If used with personGeneration, personGeneration enum would take precedence. For instance, if ALLOW_NONE is set, all person generation would be blocked. If this field is unspecified, the default behavior is to allow prominent people.
},
"logprobs": 42, # Optional. The number of top log probabilities to return for each token. This can be used to see which other tokens were considered likely candidates for a given position. A higher value will return more options, but it will also increase the size of the response.
"maxOutputTokens": 42, # Optional. The maximum number of tokens to generate in the response. A token is approximately four characters. The default value varies by model. This parameter can be used to control the length of the generated text and prevent overly long responses.
"mediaResolution": "A String", # Optional. The token resolution at which input media content is sampled. This is used to control the trade-off between the quality of the response and the number of tokens used to represent the media. A higher resolution allows the model to perceive more detail, which can lead to a more nuanced response, but it will also use more tokens. This does not affect the image dimensions sent to the model.
"presencePenalty": 3.14, # Optional. Penalizes tokens that have already appeared in the generated text. A positive value encourages the model to generate more diverse and less repetitive text. Valid values can range from [-2.0, 2.0].
"responseJsonSchema": "", # Optional. When this field is set, response_schema must be omitted and response_mime_type must be set to `application/json`.
"responseLogprobs": True or False, # Optional. If set to true, the log probabilities of the output tokens are returned. Log probabilities are the logarithm of the probability of a token appearing in the output. A higher log probability means the token is more likely to be generated. This can be useful for analyzing the model's confidence in its own output and for debugging.
"responseMimeType": "A String", # Optional. The IANA standard MIME type of the response. The model will generate output that conforms to this MIME type. Supported values include 'text/plain' (default) and 'application/json'. The model needs to be prompted to output the appropriate response type, otherwise the behavior is undefined.
"responseModalities": [ # Optional. The modalities of the response. The model will generate a response that includes all the specified modalities. For example, if this is set to `[TEXT, IMAGE]`, the response will include both text and an image.
"A String",
],
"responseSchema": { # Defines the schema of input and output data. This is a subset of the [OpenAPI 3.0 Schema Object](https://spec.openapis.org/oas/v3.0.3#schema-object). # Optional. Lets you to specify a schema for the model's response, ensuring that the output conforms to a particular structure. This is useful for generating structured data such as JSON. The schema is a subset of the [OpenAPI 3.0 schema object](https://spec.openapis.org/oas/v3.0.3#schema) object. When this field is set, you must also set the `response_mime_type` to `application/json`.
"additionalProperties": "", # Optional. If `type` is `OBJECT`, specifies how to handle properties not defined in `properties`. If it is a boolean `false`, no additional properties are allowed. If it is a schema, additional properties are allowed if they conform to the schema.
"anyOf": [ # Optional. The instance must be valid against any (one or more) of the subschemas listed in `any_of`.
# Object with schema name: GoogleCloudAiplatformV1Schema
],
"default": "", # Optional. Default value to use if the field is not specified.
"defs": { # Optional. `defs` provides a map of schema definitions that can be reused by `ref` elsewhere in the schema. Only allowed at root level of the schema.
"a_key": # Object with schema name: GoogleCloudAiplatformV1Schema
},
"description": "A String", # Optional. Describes the data. The model uses this field to understand the purpose of the schema and how to use it. It is a best practice to provide a clear and descriptive explanation for the schema and its properties here, rather than in the prompt.
"enum": [ # Optional. Possible values of the field. This field can be used to restrict a value to a fixed set of values. To mark a field as an enum, set `format` to `enum` and provide the list of possible values in `enum`. For example: 1. To define directions: `{type:STRING, format:enum, enum:["EAST", "NORTH", "SOUTH", "WEST"]}` 2. To define apartment numbers: `{type:INTEGER, format:enum, enum:["101", "201", "301"]}`
"A String",
],
"example": "", # Optional. Example of an instance of this schema.
"format": "A String", # Optional. The format of the data. For `NUMBER` type, format can be `float` or `double`. For `INTEGER` type, format can be `int32` or `int64`. For `STRING` type, format can be `email`, `byte`, `date`, `date-time`, `password`, and other formats to further refine the data type.
"items": # Object with schema name: GoogleCloudAiplatformV1Schema # Optional. If type is `ARRAY`, `items` specifies the schema of elements in the array.
"maxItems": "A String", # Optional. If type is `ARRAY`, `max_items` specifies the maximum number of items in an array.
"maxLength": "A String", # Optional. If type is `STRING`, `max_length` specifies the maximum length of the string.
"maxProperties": "A String", # Optional. If type is `OBJECT`, `max_properties` specifies the maximum number of properties that can be provided.
"maximum": 3.14, # Optional. If type is `INTEGER` or `NUMBER`, `maximum` specifies the maximum allowed value.
"minItems": "A String", # Optional. If type is `ARRAY`, `min_items` specifies the minimum number of items in an array.
"minLength": "A String", # Optional. If type is `STRING`, `min_length` specifies the minimum length of the string.
"minProperties": "A String", # Optional. If type is `OBJECT`, `min_properties` specifies the minimum number of properties that can be provided.
"minimum": 3.14, # Optional. If type is `INTEGER` or `NUMBER`, `minimum` specifies the minimum allowed value.
"nullable": True or False, # Optional. Indicates if the value of this field can be null.
"pattern": "A String", # Optional. If type is `STRING`, `pattern` specifies a regular expression that the string must match.
"properties": { # Optional. If type is `OBJECT`, `properties` is a map of property names to schema definitions for each property of the object.
"a_key": # Object with schema name: GoogleCloudAiplatformV1Schema
},
"propertyOrdering": [ # Optional. Order of properties displayed or used where order matters. This is not a standard field in OpenAPI specification, but can be used to control the order of properties.
"A String",
],
"ref": "A String", # Optional. Allows referencing another schema definition to use in place of this schema. The value must be a valid reference to a schema in `defs`. For example, the following schema defines a reference to a schema node named "Pet": type: object properties: pet: ref: #/defs/Pet defs: Pet: type: object properties: name: type: string The value of the "pet" property is a reference to the schema node named "Pet". See details in https://json-schema.org/understanding-json-schema/structuring
"required": [ # Optional. If type is `OBJECT`, `required` lists the names of properties that must be present.
"A String",
],
"title": "A String", # Optional. Title for the schema.
"type": "A String", # Optional. Data type of the schema field.
},
"routingConfig": { # The configuration for routing the request to a specific model. This can be used to control which model is used for the generation, either automatically or by specifying a model name. # Optional. Routing configuration.
"autoMode": { # The configuration for automated routing. When automated routing is specified, the routing will be determined by the pretrained routing model and customer provided model routing preference. # In this mode, the model is selected automatically based on the content of the request.
"modelRoutingPreference": "A String", # The model routing preference.
},
"manualMode": { # The configuration for manual routing. When manual routing is specified, the model will be selected based on the model name provided. # In this mode, the model is specified manually.
"modelName": "A String", # The name of the model to use. Only public LLM models are accepted.
},
},
"seed": 42, # Optional. A seed for the random number generator. By setting a seed, you can make the model's output mostly deterministic. For a given prompt and parameters (like temperature, top_p, etc.), the model will produce the same response every time. However, it's not a guaranteed absolute deterministic behavior. This is different from parameters like `temperature`, which control the *level* of randomness. `seed` ensures that the "random" choices the model makes are the same on every run, making it essential for testing and ensuring reproducible results.
"speechConfig": { # Configuration for speech generation. # Optional. The speech generation config.
"languageCode": "A String", # Optional. The language code (ISO 639-1) for the speech synthesis.
"multiSpeakerVoiceConfig": { # Configuration for a multi-speaker text-to-speech request. # The configuration for a multi-speaker text-to-speech request. This field is mutually exclusive with `voice_config`.
"speakerVoiceConfigs": [ # Required. A list of configurations for the voices of the speakers. Exactly two speaker voice configurations must be provided.
{ # Configuration for a single speaker in a multi-speaker setup.
"speaker": "A String", # Required. The name of the speaker. This should be the same as the speaker name used in the prompt.
"voiceConfig": { # Configuration for a voice. # Required. The configuration for the voice of this speaker.
"prebuiltVoiceConfig": { # Configuration for a prebuilt voice. # The configuration for a prebuilt voice.
"voiceName": "A String", # The name of the prebuilt voice to use.
},
"replicatedVoiceConfig": { # The configuration for the replicated voice to use. # Optional. The configuration for a replicated voice. This enables users to replicate a voice from an audio sample.
"mimeType": "A String", # Optional. The mimetype of the voice sample. The only currently supported value is `audio/wav`. This represents 16-bit signed little-endian wav data, with a 24kHz sampling rate. `mime_type` will default to `audio/wav` if not set.
"voiceSampleAudio": "A String", # Optional. The sample of the custom voice.
},
},
},
],
},
"voiceConfig": { # Configuration for a voice. # The configuration for the voice to use.
"prebuiltVoiceConfig": { # Configuration for a prebuilt voice. # The configuration for a prebuilt voice.
"voiceName": "A String", # The name of the prebuilt voice to use.
},
"replicatedVoiceConfig": { # The configuration for the replicated voice to use. # Optional. The configuration for a replicated voice. This enables users to replicate a voice from an audio sample.
"mimeType": "A String", # Optional. The mimetype of the voice sample. The only currently supported value is `audio/wav`. This represents 16-bit signed little-endian wav data, with a 24kHz sampling rate. `mime_type` will default to `audio/wav` if not set.
"voiceSampleAudio": "A String", # Optional. The sample of the custom voice.
},
},
},
"stopSequences": [ # Optional. A list of character sequences that will stop the model from generating further tokens. If a stop sequence is generated, the output will end at that point. This is useful for controlling the length and structure of the output. For example, you can use ["\n", "###"] to stop generation at a new line or a specific marker.
"A String",
],
"temperature": 3.14, # Optional. Controls the randomness of the output. A higher temperature results in more creative and diverse responses, while a lower temperature makes the output more predictable and focused. The valid range is (0.0, 2.0].
"thinkingConfig": { # Configuration for the model's thinking features. "Thinking" is a process where the model breaks down a complex task into smaller, manageable steps. This allows the model to reason about the task, plan its approach, and execute the plan to generate a high-quality response. # Optional. Configuration for thinking features. An error will be returned if this field is set for models that don't support thinking.
"includeThoughts": True or False, # Optional. If true, the model will include its thoughts in the response. "Thoughts" are the intermediate steps the model takes to arrive at the final response. They can provide insights into the model's reasoning process and help with debugging. If this is true, thoughts are returned only when available.
"thinkingBudget": 42, # Optional. The token budget for the model's thinking process. The model will make a best effort to stay within this budget. This can be used to control the trade-off between response quality and latency.
"thinkingLevel": "A String", # Optional. The number of thoughts tokens that the model should generate.
},
"topK": 3.14, # Optional. Specifies the top-k sampling threshold. The model considers only the top k most probable tokens for the next token. This can be useful for generating more coherent and less random text. For example, a `top_k` of 40 means the model will choose the next word from the 40 most likely words.
"topP": 3.14, # Optional. Specifies the nucleus sampling threshold. The model considers only the smallest set of tokens whose cumulative probability is at least `top_p`. This helps generate more diverse and less repetitive responses. For example, a `top_p` of 0.9 means the model considers tokens until the cumulative probability of the tokens to select from reaches 0.9. It's recommended to adjust either temperature or `top_p`, but not both.
},
"instances": [ # Optional. The instances that are the input to token counting call. Schema is identical to the prediction schema of the underlying model.
"",
],
"model": "A String", # Optional. The name of the publisher model requested to serve the prediction. Format: `projects/{project}/locations/{location}/publishers/*/models/*`
"systemInstruction": { # The structured data content of a message. A Content message contains a `role` field, which indicates the producer of the content, and a `parts` field, which contains the multi-part data of the message. # Optional. The user provided system instructions for the model. Note: only text should be used in parts and content in each part will be in a separate paragraph.
"parts": [ # Required. A list of Part objects that make up a single message. Parts of a message can have different MIME types. A Content message must have at least one Part.
{ # A datatype containing media that is part of a multi-part Content message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. For media types that are not text, `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes.
"codeExecutionResult": { # Result of executing the ExecutableCode. Generated only when the `CodeExecution` tool is used. # Optional. The result of executing the ExecutableCode.
"outcome": "A String", # Required. Outcome of the code execution.
"output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise.
},
"executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the `CodeExecution` tool, in which the code will be automatically executed, and a corresponding CodeExecutionResult will also be generated. # Optional. Code generated by the model that is intended to be executed.
"code": "A String", # Required. The code to be executed.
"language": "A String", # Required. Programming language of the `code`.
},
"fileData": { # URI-based data. A FileData message contains a URI pointing to data of a specific media type. It is used to represent images, audio, and video stored in Google Cloud Storage. # Optional. The URI-based data of the part. This can be used to include files from Google Cloud Storage.
"displayName": "A String", # Optional. The display name of the file. Used to provide a label or filename to distinguish files. This field is only returned in `PromptMessage` for prompt management. It is used in the Gemini calls only when server side tools (`code_execution`, `google_search`, and `url_context`) are enabled.
"fileUri": "A String", # Required. The URI of the file in Google Cloud Storage.
"mimeType": "A String", # Required. The IANA standard MIME type of the source data.
},
"functionCall": { # A predicted FunctionCall returned from the model that contains a string representing the FunctionDeclaration.name and a structured JSON object containing the parameters and their values. # Optional. A predicted function call returned from the model. This contains the name of the function to call and the arguments to pass to the function.
"args": { # Optional. The function parameters and values in JSON object format. See FunctionDeclaration.parameters for parameter details.
"a_key": "", # Properties of the object.
},
"name": "A String", # Optional. The name of the function to call. Matches FunctionDeclaration.name.
"partialArgs": [ # Optional. The partial argument value of the function call. If provided, represents the arguments/fields that are streamed incrementally.
{ # Partial argument value of the function call.
"boolValue": True or False, # Optional. Represents a boolean value.
"jsonPath": "A String", # Required. A JSON Path (RFC 9535) to the argument being streamed. https://datatracker.ietf.org/doc/html/rfc9535. e.g. "$.foo.bar[0].data".
"nullValue": "A String", # Optional. Represents a null value.
"numberValue": 3.14, # Optional. Represents a double value.
"stringValue": "A String", # Optional. Represents a string value.
"willContinue": True or False, # Optional. Whether this is not the last part of the same json_path. If true, another PartialArg message for the current json_path is expected to follow.
},
],
"willContinue": True or False, # Optional. Whether this is the last part of the FunctionCall. If true, another partial message for the current FunctionCall is expected to follow.
},
"functionResponse": { # The result output from a FunctionCall that contains a string representing the FunctionDeclaration.name and a structured JSON object containing any output from the function is used as context to the model. This should contain the result of a `FunctionCall` made based on model prediction. # Optional. The result of a function call. This is used to provide the model with the result of a function call that it predicted.
"name": "A String", # Required. The name of the function to call. Matches FunctionDeclaration.name and FunctionCall.name.
"parts": [ # Optional. Ordered `Parts` that constitute a function response. Parts may have different IANA MIME types.
{ # A datatype containing media that is part of a `FunctionResponse` message. A `FunctionResponsePart` consists of data which has an associated datatype. A `FunctionResponsePart` can only contain one of the accepted types in `FunctionResponsePart.data`. A `FunctionResponsePart` must have a fixed IANA MIME type identifying the type and subtype of the media if the `inline_data` field is filled with raw bytes.
"fileData": { # URI based data for function response. # URI based data.
"displayName": "A String", # Optional. Display name of the file data. Used to provide a label or filename to distinguish file datas. This field is only returned in PromptMessage for prompt management. It is currently used in the Gemini GenerateContent calls only when server side tools (code_execution, google_search, and url_context) are enabled.
"fileUri": "A String", # Required. URI.
"mimeType": "A String", # Required. The IANA standard MIME type of the source data.
},
"inlineData": { # Raw media bytes for function response. Text should not be sent as raw bytes, use the 'text' field. # Inline media bytes.
"data": "A String", # Required. Raw bytes.
"displayName": "A String", # Optional. Display name of the blob. Used to provide a label or filename to distinguish blobs. This field is only returned in PromptMessage for prompt management. It is currently used in the Gemini GenerateContent calls only when server side tools (code_execution, google_search, and url_context) are enabled.
"mimeType": "A String", # Required. The IANA standard MIME type of the source data.
},
},
],
"response": { # Required. The function response in JSON object format. Use "output" key to specify function output and "error" key to specify error details (if any). If "output" and "error" keys are not specified, then whole "response" is treated as function output.
"a_key": "", # Properties of the object.
},
"scheduling": "A String", # Optional. Specifies how the response should be scheduled in the conversation. Only applicable to NON_BLOCKING function calls, is ignored otherwise. Defaults to WHEN_IDLE.
},
"inlineData": { # A content blob. A Blob contains data of a specific media type. It is used to represent images, audio, and video. # Optional. The inline data content of the part. This can be used to include images, audio, or video in a request.
"data": "A String", # Required. The raw bytes of the data.
"displayName": "A String", # Optional. The display name of the blob. Used to provide a label or filename to distinguish blobs. This field is only returned in `PromptMessage` for prompt management. It is used in the Gemini calls only when server-side tools (`code_execution`, `google_search`, and `url_context`) are enabled.
"mimeType": "A String", # Required. The IANA standard MIME type of the source data.
},
"mediaResolution": { # per part media resolution. Media resolution for the input media. # per part media resolution. Media resolution for the input media.
"level": "A String", # The tokenization quality used for given media.
},
"text": "A String", # Optional. The text content of the part. When sent from the VSCode Gemini Code Assist extension, references to @mentioned items will be converted to markdown boldface text. For example `@my-repo` will be converted to and sent as `**my-repo**` by the IDE agent.
"thought": True or False, # Optional. Indicates whether the `part` represents the model's thought process or reasoning.
"thoughtSignature": "A String", # Optional. An opaque signature for the thought so it can be reused in subsequent requests.
"videoMetadata": { # Provides metadata for a video, including the start and end offsets for clipping and the frame rate. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data.
"endOffset": "A String", # Optional. The end offset of the video.
"fps": 3.14, # Optional. The frame rate of the video sent to the model. If not specified, the default value is 1.0. The valid range is (0.0, 24.0].
"startOffset": "A String", # Optional. The start offset of the video.
},
},
],
"role": "A String", # Optional. The producer of the content. Must be either 'user' or 'model'. If not set, the service will default to 'user'.
},
"tools": [ # Optional. A list of `Tools` the model may use to generate the next response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model.
{ # Tool details that the model may use to generate response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. A Tool object should contain exactly one type of Tool (e.g FunctionDeclaration, Retrieval or GoogleSearchRetrieval).
"codeExecution": { # Tool that executes code generated by the model, and automatically returns the result to the model. See also ExecutableCode and CodeExecutionResult, which are input and output to this tool. # Optional. CodeExecution tool type. Enables the model to execute code as part of generation.
},
"computerUse": { # Tool to support computer use. # Optional. Tool to support the model interacting directly with the computer. If enabled, it automatically populates computer-use specific Function Declarations.
"environment": "A String", # Required. The environment being operated.
"excludedPredefinedFunctions": [ # Optional. By default, [predefined functions](https://cloud.google.com/vertex-ai/generative-ai/docs/computer-use#supported-actions) are included in the final model call. Some of them can be explicitly excluded from being automatically included. This can serve two purposes: 1. Using a more restricted / different action space. 2. Improving the definitions / instructions of predefined functions.
"A String",
],
},
"enterpriseWebSearch": { # Tool to search public web data, powered by Vertex AI Search and Sec4 compliance. # Optional. Tool to support searching public web data, powered by Vertex AI Search and Sec4 compliance.
"blockingConfidence": "A String", # Optional. Sites with confidence level chosen & above this value will be blocked from the search results.
"excludeDomains": [ # Optional. List of domains to be excluded from the search results. The default limit is 2000 domains.
"A String",
],
},
"functionDeclarations": [ # Optional. Function tool type. One or more function declarations to be passed to the model along with the current user query. Model may decide to call a subset of these functions by populating FunctionCall in the response. User should provide a FunctionResponse for each function call in the next turn. Based on the function responses, Model will generate the final response back to the user. Maximum 512 function declarations can be provided.
{ # Structured representation of a function declaration as defined by the [OpenAPI 3.0 specification](https://spec.openapis.org/oas/v3.0.3). Included in this declaration are the function name, description, parameters and response type. This FunctionDeclaration is a representation of a block of code that can be used as a `Tool` by the model and executed by the client.
"description": "A String", # Optional. Description and purpose of the function. Model uses it to decide how and whether to call the function.
"name": "A String", # Required. The name of the function to call. Must start with a letter or an underscore. Must be a-z, A-Z, 0-9, or contain underscores, dots, colons and dashes, with a maximum length of 128.
"parameters": { # Defines the schema of input and output data. This is a subset of the [OpenAPI 3.0 Schema Object](https://spec.openapis.org/oas/v3.0.3#schema-object). # Optional. Describes the parameters to this function in JSON Schema Object format. Reflects the Open API 3.03 Parameter Object. string Key: the name of the parameter. Parameter names are case sensitive. Schema Value: the Schema defining the type used for the parameter. For function with no parameters, this can be left unset. Parameter names must start with a letter or an underscore and must only contain chars a-z, A-Z, 0-9, or underscores with a maximum length of 64. Example with 1 required and 1 optional parameter: type: OBJECT properties: param1: type: STRING param2: type: INTEGER required: - param1
"additionalProperties": "", # Optional. If `type` is `OBJECT`, specifies how to handle properties not defined in `properties`. If it is a boolean `false`, no additional properties are allowed. If it is a schema, additional properties are allowed if they conform to the schema.
"anyOf": [ # Optional. The instance must be valid against any (one or more) of the subschemas listed in `any_of`.
# Object with schema name: GoogleCloudAiplatformV1Schema
],
"default": "", # Optional. Default value to use if the field is not specified.
"defs": { # Optional. `defs` provides a map of schema definitions that can be reused by `ref` elsewhere in the schema. Only allowed at root level of the schema.
"a_key": # Object with schema name: GoogleCloudAiplatformV1Schema
},
"description": "A String", # Optional. Describes the data. The model uses this field to understand the purpose of the schema and how to use it. It is a best practice to provide a clear and descriptive explanation for the schema and its properties here, rather than in the prompt.
"enum": [ # Optional. Possible values of the field. This field can be used to restrict a value to a fixed set of values. To mark a field as an enum, set `format` to `enum` and provide the list of possible values in `enum`. For example: 1. To define directions: `{type:STRING, format:enum, enum:["EAST", "NORTH", "SOUTH", "WEST"]}` 2. To define apartment numbers: `{type:INTEGER, format:enum, enum:["101", "201", "301"]}`
"A String",
],
"example": "", # Optional. Example of an instance of this schema.
"format": "A String", # Optional. The format of the data. For `NUMBER` type, format can be `float` or `double`. For `INTEGER` type, format can be `int32` or `int64`. For `STRING` type, format can be `email`, `byte`, `date`, `date-time`, `password`, and other formats to further refine the data type.
"items": # Object with schema name: GoogleCloudAiplatformV1Schema # Optional. If type is `ARRAY`, `items` specifies the schema of elements in the array.
"maxItems": "A String", # Optional. If type is `ARRAY`, `max_items` specifies the maximum number of items in an array.
"maxLength": "A String", # Optional. If type is `STRING`, `max_length` specifies the maximum length of the string.
"maxProperties": "A String", # Optional. If type is `OBJECT`, `max_properties` specifies the maximum number of properties that can be provided.
"maximum": 3.14, # Optional. If type is `INTEGER` or `NUMBER`, `maximum` specifies the maximum allowed value.
"minItems": "A String", # Optional. If type is `ARRAY`, `min_items` specifies the minimum number of items in an array.
"minLength": "A String", # Optional. If type is `STRING`, `min_length` specifies the minimum length of the string.
"minProperties": "A String", # Optional. If type is `OBJECT`, `min_properties` specifies the minimum number of properties that can be provided.
"minimum": 3.14, # Optional. If type is `INTEGER` or `NUMBER`, `minimum` specifies the minimum allowed value.
"nullable": True or False, # Optional. Indicates if the value of this field can be null.
"pattern": "A String", # Optional. If type is `STRING`, `pattern` specifies a regular expression that the string must match.
"properties": { # Optional. If type is `OBJECT`, `properties` is a map of property names to schema definitions for each property of the object.
"a_key": # Object with schema name: GoogleCloudAiplatformV1Schema
},
"propertyOrdering": [ # Optional. Order of properties displayed or used where order matters. This is not a standard field in OpenAPI specification, but can be used to control the order of properties.
"A String",
],
"ref": "A String", # Optional. Allows referencing another schema definition to use in place of this schema. The value must be a valid reference to a schema in `defs`. For example, the following schema defines a reference to a schema node named "Pet": type: object properties: pet: ref: #/defs/Pet defs: Pet: type: object properties: name: type: string The value of the "pet" property is a reference to the schema node named "Pet". See details in https://json-schema.org/understanding-json-schema/structuring
"required": [ # Optional. If type is `OBJECT`, `required` lists the names of properties that must be present.
"A String",
],
"title": "A String", # Optional. Title for the schema.
"type": "A String", # Optional. Data type of the schema field.
},
"parametersJsonSchema": "", # Optional. Describes the parameters to the function in JSON Schema format. The schema must describe an object where the properties are the parameters to the function. For example: ``` { "type": "object", "properties": { "name": { "type": "string" }, "age": { "type": "integer" } }, "additionalProperties": false, "required": ["name", "age"], "propertyOrdering": ["name", "age"] } ``` This field is mutually exclusive with `parameters`.
"response": { # Defines the schema of input and output data. This is a subset of the [OpenAPI 3.0 Schema Object](https://spec.openapis.org/oas/v3.0.3#schema-object). # Optional. Describes the output from this function in JSON Schema format. Reflects the Open API 3.03 Response Object. The Schema defines the type used for the response value of the function.
"additionalProperties": "", # Optional. If `type` is `OBJECT`, specifies how to handle properties not defined in `properties`. If it is a boolean `false`, no additional properties are allowed. If it is a schema, additional properties are allowed if they conform to the schema.
"anyOf": [ # Optional. The instance must be valid against any (one or more) of the subschemas listed in `any_of`.
# Object with schema name: GoogleCloudAiplatformV1Schema
],
"default": "", # Optional. Default value to use if the field is not specified.
"defs": { # Optional. `defs` provides a map of schema definitions that can be reused by `ref` elsewhere in the schema. Only allowed at root level of the schema.
"a_key": # Object with schema name: GoogleCloudAiplatformV1Schema
},
"description": "A String", # Optional. Describes the data. The model uses this field to understand the purpose of the schema and how to use it. It is a best practice to provide a clear and descriptive explanation for the schema and its properties here, rather than in the prompt.
"enum": [ # Optional. Possible values of the field. This field can be used to restrict a value to a fixed set of values. To mark a field as an enum, set `format` to `enum` and provide the list of possible values in `enum`. For example: 1. To define directions: `{type:STRING, format:enum, enum:["EAST", "NORTH", "SOUTH", "WEST"]}` 2. To define apartment numbers: `{type:INTEGER, format:enum, enum:["101", "201", "301"]}`
"A String",
],
"example": "", # Optional. Example of an instance of this schema.
"format": "A String", # Optional. The format of the data. For `NUMBER` type, format can be `float` or `double`. For `INTEGER` type, format can be `int32` or `int64`. For `STRING` type, format can be `email`, `byte`, `date`, `date-time`, `password`, and other formats to further refine the data type.
"items": # Object with schema name: GoogleCloudAiplatformV1Schema # Optional. If type is `ARRAY`, `items` specifies the schema of elements in the array.
"maxItems": "A String", # Optional. If type is `ARRAY`, `max_items` specifies the maximum number of items in an array.
"maxLength": "A String", # Optional. If type is `STRING`, `max_length` specifies the maximum length of the string.
"maxProperties": "A String", # Optional. If type is `OBJECT`, `max_properties` specifies the maximum number of properties that can be provided.
"maximum": 3.14, # Optional. If type is `INTEGER` or `NUMBER`, `maximum` specifies the maximum allowed value.
"minItems": "A String", # Optional. If type is `ARRAY`, `min_items` specifies the minimum number of items in an array.
"minLength": "A String", # Optional. If type is `STRING`, `min_length` specifies the minimum length of the string.
"minProperties": "A String", # Optional. If type is `OBJECT`, `min_properties` specifies the minimum number of properties that can be provided.
"minimum": 3.14, # Optional. If type is `INTEGER` or `NUMBER`, `minimum` specifies the minimum allowed value.
"nullable": True or False, # Optional. Indicates if the value of this field can be null.
"pattern": "A String", # Optional. If type is `STRING`, `pattern` specifies a regular expression that the string must match.
"properties": { # Optional. If type is `OBJECT`, `properties` is a map of property names to schema definitions for each property of the object.
"a_key": # Object with schema name: GoogleCloudAiplatformV1Schema
},
"propertyOrdering": [ # Optional. Order of properties displayed or used where order matters. This is not a standard field in OpenAPI specification, but can be used to control the order of properties.
"A String",
],
"ref": "A String", # Optional. Allows referencing another schema definition to use in place of this schema. The value must be a valid reference to a schema in `defs`. For example, the following schema defines a reference to a schema node named "Pet": type: object properties: pet: ref: #/defs/Pet defs: Pet: type: object properties: name: type: string The value of the "pet" property is a reference to the schema node named "Pet". See details in https://json-schema.org/understanding-json-schema/structuring
"required": [ # Optional. If type is `OBJECT`, `required` lists the names of properties that must be present.
"A String",
],
"title": "A String", # Optional. Title for the schema.
"type": "A String", # Optional. Data type of the schema field.
},
"responseJsonSchema": "", # Optional. Describes the output from this function in JSON Schema format. The value specified by the schema is the response value of the function. This field is mutually exclusive with `response`.
},
],
"googleMaps": { # Tool to retrieve public maps data for grounding, powered by Google. # Optional. GoogleMaps tool type. Tool to support Google Maps in Model.
"enableWidget": True or False, # Optional. If true, include the widget context token in the response.
},
"googleSearch": { # GoogleSearch tool type. Tool to support Google Search in Model. Powered by Google. # Optional. GoogleSearch tool type. Tool to support Google Search in Model. Powered by Google.
"blockingConfidence": "A String", # Optional. Sites with confidence level chosen & above this value will be blocked from the search results.
"excludeDomains": [ # Optional. List of domains to be excluded from the search results. The default limit is 2000 domains. Example: ["amazon.com", "facebook.com"].
"A String",
],
"searchTypes": { # Different types of search that can be enabled on the GoogleSearch tool. # Optional. The set of search types to enable. If not set, web search is enabled by default.
"imageSearch": { # Image search for grounding and related configurations. # Optional. Setting this field enables image search. Image bytes are returned.
},
"webSearch": { # Standard web search for grounding and related configurations. Only text results are returned. # Optional. Setting this field enables web search. Only text results are returned.
},
},
},
"googleSearchRetrieval": { # Tool to retrieve public web data for grounding, powered by Google. # Optional. Specialized retrieval tool that is powered by Google Search.
"dynamicRetrievalConfig": { # Describes the options to customize dynamic retrieval. # Specifies the dynamic retrieval configuration for the given source.
"dynamicThreshold": 3.14, # Optional. The threshold to be used in dynamic retrieval. If not set, a system default value is used.
"mode": "A String", # The mode of the predictor to be used in dynamic retrieval.
},
},
"parallelAiSearch": { # ParallelAiSearch tool type. A tool that uses the Parallel.ai search engine for grounding. # Optional. If specified, Vertex AI will use Parallel.ai to search for information to answer user queries. The search results will be grounded on Parallel.ai and presented to the model for response generation
"apiKey": "A String", # Optional. The API key for ParallelAiSearch. If an API key is not provided, the system will attempt to verify access by checking for an active Parallel.ai subscription through the Google Cloud Marketplace. See https://docs.parallel.ai/search/search-quickstart for more details.
"customConfigs": { # Optional. Custom configs for ParallelAiSearch. This field can be used to pass any parameter from the Parallel.ai Search API. See the Parallel.ai documentation for the full list of available parameters and their usage: https://docs.parallel.ai/api-reference/search-beta/search Currently only `source_policy`, `excerpts`, `max_results`, `mode`, `fetch_policy` can be set via this field. For example: { "source_policy": { "include_domains": ["google.com", "wikipedia.org"], "exclude_domains": ["example.com"] }, "fetch_policy": { "max_age_seconds": 3600 } }
"a_key": "", # Properties of the object.
},
},
"retrieval": { # Defines a retrieval tool that model can call to access external knowledge. # Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation.
"disableAttribution": True or False, # Optional. Deprecated. This option is no longer supported.
"externalApi": { # Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec. # Use data source powered by external API for grounding.
"apiAuth": { # The generic reusable api auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead. # The authentication config to access the API. Deprecated. Please use auth_config instead.
"apiKeyConfig": { # The API secret. # The API secret.
"apiKeySecretVersion": "A String", # Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version}
"apiKeyString": "A String", # The API key string. Either this or `api_key_secret_version` must be set.
},
},
"apiSpec": "A String", # The API spec that the external API implements.
"authConfig": { # Auth configuration to run the extension. # The authentication config to access the API.
"apiKeyConfig": { # Config for authentication with API key. # Config for API key auth.
"apiKeySecret": "A String", # Optional. The name of the SecretManager secret version resource storing the API key. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If both `api_key_secret` and `api_key_string` are specified, this field takes precedence over `api_key_string`. - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource.
"apiKeyString": "A String", # Optional. The API key to be used in the request directly.
"httpElementLocation": "A String", # Optional. The location of the API key.
"name": "A String", # Optional. The parameter name of the API key. E.g. If the API request is "https://example.com/act?api_key=", "api_key" would be the parameter name.
},
"authType": "A String", # Type of auth scheme.
"googleServiceAccountConfig": { # Config for Google Service Account Authentication. # Config for Google Service Account auth.
"serviceAccount": "A String", # Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension.
},
"httpBasicAuthConfig": { # Config for HTTP Basic Authentication. # Config for HTTP Basic auth.
"credentialSecret": "A String", # Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource.
},
"oauthConfig": { # Config for user oauth. # Config for user oauth.
"accessToken": "A String", # Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time.
"serviceAccount": "A String", # The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account.
},
"oidcConfig": { # Config for user OIDC auth. # Config for user OIDC auth.
"idToken": "A String", # OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time.
"serviceAccount": "A String", # The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents).
},
},
"elasticSearchParams": { # The search parameters to use for the ELASTIC_SEARCH spec. # Parameters for the elastic search API.
"index": "A String", # The ElasticSearch index to use.
"numHits": 42, # Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param.
"searchTemplate": "A String", # The ElasticSearch search template to use.
},
"endpoint": "A String", # The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search
"simpleSearchParams": { # The search parameters to use for SIMPLE_SEARCH spec. # Parameters for the simple search API.
},
},
"vertexAiSearch": { # Retrieve from Vertex AI Search datastore or engine for grounding. datastore and engine are mutually exclusive. See https://cloud.google.com/products/agent-builder # Set to use data source powered by Vertex AI Search.
"dataStoreSpecs": [ # Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used.
{ # Define data stores within engine to filter on in a search call and configurations for those data stores. For more information, see https://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec
"dataStore": "A String", # Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}`
"filter": "A String", # Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata)
},
],
"datastore": "A String", # Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}`
"engine": "A String", # Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}`
"filter": "A String", # Optional. Filter strings to be passed to the search API.
"maxResults": 42, # Optional. Number of search results to return per query. The default value is 10. The maximumm allowed value is 10.
},
"vertexRagStore": { # Retrieve from Vertex RAG Store for grounding. # Set to use data source powered by Vertex RAG store. User data is uploaded via the VertexRagDataService.
"ragResources": [ # Optional. The representation of the rag source. It can be used to specify corpus only or ragfiles. Currently only support one corpus or multiple files from one corpus. In the future we may open up multiple corpora support.
{ # The definition of the Rag resource.
"ragCorpus": "A String", # Optional. RagCorpora resource name. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}`
"ragFileIds": [ # Optional. rag_file_id. The files should be in the same rag_corpus set in rag_corpus field.
"A String",
],
},
],
"ragRetrievalConfig": { # Specifies the context retrieval config. # Optional. The retrieval config for the Rag query.
"filter": { # Config for filters. # Optional. Config for filters.
"metadataFilter": "A String", # Optional. String for metadata filtering.
"vectorDistanceThreshold": 3.14, # Optional. Only returns contexts with vector distance smaller than the threshold.
"vectorSimilarityThreshold": 3.14, # Optional. Only returns contexts with vector similarity larger than the threshold.
},
"ranking": { # Config for ranking and reranking. # Optional. Config for ranking and reranking.
"llmRanker": { # Config for LlmRanker. # Optional. Config for LlmRanker.
"modelName": "A String", # Optional. The model name used for ranking. See [Supported models](https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference#supported-models).
},
"rankService": { # Config for Rank Service. # Optional. Config for Rank Service.
"modelName": "A String", # Optional. The model name of the rank service. Format: `semantic-ranker-512@latest`
},
},
"topK": 42, # Optional. The number of contexts to retrieve.
},
"similarityTopK": 42, # Optional. Number of top k results to return from the selected corpora.
"vectorDistanceThreshold": 3.14, # Optional. Only return results with vector distance smaller than the threshold.
},
},
"urlContext": { # Tool to support URL context. # Optional. Tool to support URL context retrieval.
},
},
],
}
x__xgafv: string, V1 error format.
Allowed values
1 - v1 error format
2 - v2 error format
Returns:
An object of the form:
{ # Response message for PredictionService.CountTokens.
"promptTokensDetails": [ # Output only. List of modalities that were processed in the request input.
{ # Represents a breakdown of token usage by modality. This message is used in CountTokensResponse and GenerateContentResponse.UsageMetadata to provide a detailed view of how many tokens are used by each modality (e.g., text, image, video) in a request. This is particularly useful for multimodal models, allowing you to track and manage token consumption for billing and quota purposes.
"modality": "A String", # The modality that this token count applies to.
"tokenCount": 42, # The number of tokens counted for this modality.
},
],
"totalBillableCharacters": 42, # The total number of billable characters counted across all instances from the request.
"totalTokens": 42, # The total number of tokens counted across all instances from the request.
}</pre>
</div>
<div class="method">
<code class="details" id="fetchPredictOperation">fetchPredictOperation(endpoint, body=None, x__xgafv=None)</code>
<pre>Fetch an asynchronous online prediction operation.
Args:
endpoint: string, Required. The name of the Endpoint requested to serve the prediction. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` or `projects/{project}/locations/{location}/publishers/{publisher}/models/{model}` (required)
body: object, The request body.
The object takes the form of:
{ # Request message for PredictionService.FetchPredictOperation.
"operationName": "A String", # Required. The server-assigned name for the operation.
}
x__xgafv: string, V1 error format.
Allowed values
1 - v1 error format
2 - v2 error format
Returns:
An object of the form:
{ # This resource represents a long-running operation that is the result of a network API call.
"done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
"error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
"code": 42, # The status code, which should be an enum value of google.rpc.Code.
"details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
{
"a_key": "", # Properties of the object. Contains field @type with type URL.
},
],
"message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
},
"metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
"a_key": "", # Properties of the object. Contains field @type with type URL.
},
"name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
"response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
"a_key": "", # Properties of the object. Contains field @type with type URL.
},
}</pre>
</div>
<div class="method">
<code class="details" id="generateContent">generateContent(model, body=None, x__xgafv=None)</code>
<pre>Generate content with multimodal inputs.
Args:
model: string, Required. The fully qualified name of the publisher model or tuned model endpoint to use. Publisher model format: `projects/{project}/locations/{location}/publishers/*/models/*` Tuned model endpoint format: `projects/{project}/locations/{location}/endpoints/{endpoint}` (required)
body: object, The request body.
The object takes the form of:
{ # Request message for [PredictionService.GenerateContent].
"cachedContent": "A String", # Optional. The name of the cached content used as context to serve the prediction. Note: only used in explicit caching, where users can have control over caching (e.g. what content to cache) and enjoy guaranteed cost savings. Format: `projects/{project}/locations/{location}/cachedContents/{cachedContent}`
"contents": [ # Required. The content of the current conversation with the model. For single-turn queries, this is a single instance. For multi-turn queries, this is a repeated field that contains conversation history + latest request.
{ # The structured data content of a message. A Content message contains a `role` field, which indicates the producer of the content, and a `parts` field, which contains the multi-part data of the message.
"parts": [ # Required. A list of Part objects that make up a single message. Parts of a message can have different MIME types. A Content message must have at least one Part.
{ # A datatype containing media that is part of a multi-part Content message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. For media types that are not text, `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes.
"codeExecutionResult": { # Result of executing the ExecutableCode. Generated only when the `CodeExecution` tool is used. # Optional. The result of executing the ExecutableCode.
"outcome": "A String", # Required. Outcome of the code execution.
"output": "A String", # Optional. Contains stdout when code execution is successful, stderr or other description otherwise.
},
"executableCode": { # Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the `CodeExecution` tool, in which the code will be automatically executed, and a corresponding CodeExecutionResult will also be generated. # Optional. Code generated by the model that is intended to be executed.
"code": "A String", # Required. The code to be executed.
"language": "A String", # Required. Programming language of the `code`.
},
"fileData": { # URI-based data. A FileData message contains a URI pointing to data of a specific media type. It is used to represent images, audio, and video stored in Google Cloud Storage. # Optional. The URI-based data of the part. This can be used to include files from Google Cloud Storage.
"displayName": "A String", # Optional. The display name of the file. Used to provide a label or filename to distinguish files. This field is only returned in `PromptMessage` for prompt management. It is used in the Gemini calls only when server side tools (`code_execution`, `google_search`, and `url_context`) are enabled.
"fileUri": "A String", # Required. The URI of the file in Google Cloud Storage.
"mimeType": "A String", # Required. The IANA standard MIME type of the source data.
},
"functionCall": { # A predicted FunctionCall returned from the model that contains a string representing the FunctionDeclaration.name and a structured JSON object containing the parameters and their values. # Optional. A predicted function call returned from the model. This contains the name of the function to call and the arguments to pass to the function.
"args": { # Optional. The function parameters and values in JSON object format. See FunctionDeclaration.parameters for parameter details.
"a_key": "", # Properties of the object.
},
"name": "A String", # Optional. The name of the function to call. Matches FunctionDeclaration.name.
"partialArgs": [ # Optional. The partial argument value of the function call. If provided, represents the arguments/fields that are streamed incrementally.
{ # Partial argument value of the function call.
"boolValue": True or False, # Optional. Represents a boolean value.
"jsonPath": "A String", # Required. A JSON Path (RFC 9535) to the argument being streamed. https://datatracker.ietf.org/doc/html/rfc9535. e.g. "$.foo.bar[0].data".
"nullValue": "A String", # Optional. Represents a null value.
"numberValue": 3.14, # Optional. Represents a double value.
"stringValue": "A String", # Optional. Represents a string value.
"willContinue": True or False, # Optional. Whether this is not the last part of the same json_path. If true, another PartialArg message for the current json_path is expected to follow.
},
],
"willContinue": True or False, # Optional. Whether this is the last part of the FunctionCall. If true, another partial message for the current FunctionCall is expected to follow.
},
"functionResponse": { # The result output from a FunctionCall that contains a string representing the FunctionDeclaration.name and a structured JSON object containing any output from the function is used as context to the model. This should contain the result of a `FunctionCall` made based on model prediction. # Optional. The result of a function call. This is used to provide the model with the result of a function call that it predicted.
"name": "A String", # Required. The name of the function to call. Matches FunctionDeclaration.name and FunctionCall.name.
"parts": [ # Optional. Ordered `Parts` that constitute a function response. Parts may have different IANA MIME types.
{ # A datatype containing media that is part of a `FunctionResponse` message. A `FunctionResponsePart` consists of data which has an associated datatype. A `FunctionResponsePart` can only contain one of the accepted types in `FunctionResponsePart.data`. A `FunctionResponsePart` must have a fixed IANA MIME type identifying the type and subtype of the media if the `inline_data` field is filled with raw bytes.
"fileData": { # URI based data for function response. # URI based data.
"displayName": "A String", # Optional. Display name of the file data. Used to provide a label or filename to distinguish file datas. This field is only returned in PromptMessage for prompt management. It is currently used in the Gemini GenerateContent calls only when server side tools (code_execution, google_search, and url_context) are enabled.
"fileUri": "A String", # Required. URI.
"mimeType": "A String", # Required. The IANA standard MIME type of the source data.
},
"inlineData": { # Raw media bytes for function response. Text should not be sent as raw bytes, use the 'text' field. # Inline media bytes.
"data": "A String", # Required. Raw bytes.
"displayName": "A String", # Optional. Display name of the blob. Used to provide a label or filename to distinguish blobs. This field is only returned in PromptMessage for prompt management. It is currently used in the Gemini GenerateContent calls only when server side tools (code_execution, google_search, and url_context) are enabled.
"mimeType": "A String", # Required. The IANA standard MIME type of the source data.
},
},
],
"response": { # Required. The function response in JSON object format. Use "output" key to specify function output and "error" key to specify error details (if any). If "output" and "error" keys are not specified, then whole "response" is treated as function output.
"a_key": "", # Properties of the object.
},
"scheduling": "A String", # Optional. Specifies how the response should be scheduled in the conversation. Only applicable to NON_BLOCKING function calls, is ignored otherwise. Defaults to WHEN_IDLE.
},
"inlineData": { # A content blob. A Blob contains data of a specific media type. It is used to represent images, audio, and video. # Optional. The inline data content of the part. This can be used to include images, audio, or video in a request.
"data": "A String", # Required. The raw bytes of the data.
"displayName": "A String", # Optional. The display name of the blob. Used to provide a label or filename to distinguish blobs. This field is only returned in `PromptMessage` for prompt management. It is used in the Gemini calls only when server-side tools (`code_execution`, `google_search`, and `url_context`) are enabled.
"mimeType": "A String", # Required. The IANA standard MIME type of the source data.
},
"mediaResolution": { # per part media resolution. Media resolution for the input media. # per part media resolution. Media resolution for the input media.
"level": "A String", # The tokenization quality used for given media.
},
"text": "A String", # Optional. The text content of the part. When sent from the VSCode Gemini Code Assist extension, references to @mentioned items will be converted to markdown boldface text. For example `@my-repo` will be converted to and sent as `**my-repo**` by the IDE agent.
"thought": True or False, # Optional. Indicates whether the `part` represents the model's thought process or reasoning.
"thoughtSignature": "A String", # Optional. An opaque signature for the thought so it can be reused in subsequent requests.
"videoMetadata": { # Provides metadata for a video, including the start and end offsets for clipping and the frame rate. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data.
"endOffset": "A String", # Optional. The end offset of the video.
"fps": 3.14, # Optional. The frame rate of the video sent to the model. If not specified, the default value is 1.0. The valid range is (0.0, 24.0].
"startOffset": "A String", # Optional. The start offset of the video.
},
},
],
"role": "A String", # Optional. The producer of the content. Must be either 'user' or 'model'. If not set, the service will default to 'user'.
},
],
"generationConfig": { # Configuration for content generation. This message contains all the parameters that control how the model generates content. It allows you to influence the randomness, length, and structure of the output. # Optional. Generation config.
"audioTimestamp": True or False, # Optional. If enabled, audio timestamps will be included in the request to the model. This can be useful for synchronizing audio with other modalities in the response.
"candidateCount": 42, # Optional. The number of candidate responses to generate. A higher `candidate_count` can provide more options to choose from, but it also consumes more resources. This can be useful for generating a variety of responses and selecting the best one.
"enableAffectiveDialog": True or False, # Optional. If enabled, the model will detect emotions and adapt its responses accordingly. For example, if the model detects that the user is frustrated, it may provide a more empathetic response.
"frequencyPenalty": 3.14, # Optional. Penalizes tokens based on their frequency in the generated text. A positive value helps to reduce the repetition of words and phrases. Valid values can range from [-2.0, 2.0].
"imageConfig": { # Configuration for image generation. This message allows you to control various aspects of image generation, such as the output format, aspect ratio, and whether the model can generate images of people. # Optional. Config for image generation features.
"aspectRatio": "A String", # Optional. The desired aspect ratio for the generated images. The following aspect ratios are supported: "1:1" "2:3", "3:2" "3:4", "4:3" "4:5", "5:4" "9:16", "16:9" "21:9"
"imageOutputOptions": { # The image output format for generated images. # Optional. The image output format for generated images.
"compressionQuality": 42, # Optional. The compression quality of the output image.
"mimeType": "A String", # Optional. The image format that the output should be saved as.
},
"imageSize": "A String", # Optional. Specifies the size of generated images. Supported values are `1K`, `2K`, `4K`. If not specified, the model will use default value `1K`.
"personGeneration": "A String", # Optional. Controls whether the model can generate people.
"prominentPeople": "A String", # Optional. Controls whether prominent people (celebrities) generation is allowed. If used with personGeneration, personGeneration enum would take precedence. For instance, if ALLOW_NONE is set, all person generation would be blocked. If this field is unspecified, the default behavior is to allow prominent people.
},
"logprobs": 42, # Optional. The number of top log probabilities to return for each token. This can be used to see which other tokens were considered likely candidates for a given position. A higher value will return more options, but it will also increase the size of the response.
"maxOutputTokens": 42, # Optional. The maximum number of tokens to generate in the response. A token is approximately four characters. The default value varies by model. This parameter can be used to control the length of the generated text and prevent overly long responses.
"mediaResolution": "A String", # Optional. The token resolution at which input media content is sampled. This is used to control the trade-off between the quality of the response and the number of tokens used to represent the media. A higher resolution allows the model to perceive more detail, which can lead to a more nuanced response, but it will also use more tokens. This does not affect the image dimensions sent to the model.
"presencePenalty": 3.14, # Optional. Penalizes tokens that have already appeared in the generated text. A positive value encourages the model to generate more diverse and less repetitive text. Valid values can range from [-2.0, 2.0].
"responseJsonSchema": "", # Optional. When this field is set, response_schema must be omitted and response_mime_type must be set to `application/json`.
"responseLogprobs": True or False, # Optional. If set to true, the log probabilities of the output tokens are returned. Log probabilities are the logarithm of the probability of a token appearing in the output. A higher log probability means the token is more likely to be generated. This can be useful for analyzing the model's confidence in its own output and for debugging.
"responseMimeType": "A String", # Optional. The IANA standard MIME type of the response. The model will generate output that conforms to this MIME type. Supported values include 'text/plain' (default) and 'application/json'. The model needs to be prompted to output the appropriate response type, otherwise the behavior is undefined.
"responseModalities": [ # Optional. The modalities of the response. The model will generate a response that includes all the specified modalities. For example, if this is set to `[TEXT, IMAGE]`, the response will include both text and an image.
"A String",
],
"responseSchema": { # Defines the schema of input and output data. This is a subset of the [OpenAPI 3.0 Schema Object](https://spec.openapis.org/oas/v3.0.3#schema-object). # Optional. Lets you to specify a schema for the model's response, ensuring that the output conforms to a particular structure. This is useful for generating structured data such as JSON. The schema is a subset of the [OpenAPI 3.0 schema object](https://spec.openapis.org/oas/v3.0.3#schema) object. When this field is set, you must also set the `response_mime_type` to `application/json`.
"additionalProperties": "", # Optional. If `type` is `OBJECT`, specifies how to handle properties not defined in `properties`. If it is a boolean `false`, no additional properties are allowed. If it is a schema, additional properties are allowed if they conform to the schema.
"anyOf": [ # Optional. The instance must be valid against any (one or more) of the subschemas listed in `any_of`.
# Object with schema name: GoogleCloudAiplatformV1Schema
],
"default": "", # Optional. Default value to use if the field is not specified.
"defs": { # Optional. `defs` provides a map of schema definitions that can be reused by `ref` elsewhere in the schema. Only allowed at root level of the schema.
"a_key": # Object with schema name: GoogleCloudAiplatformV1Schema
},
"description": "A String", # Optional. Describes the data. The model uses this field to understand the purpose of the schema and how to use it. It is a best practice to provide a clear and descriptive explanation for the schema and its properties here, rather than in the prompt.
"enum": [ # Optional. Possible values of the field. This field can be used to restrict a value to a fixed set of values. To mark a field as an enum, set `format` to `enum` and provide the list of possible values in `enum`. For example: 1. To define directions: `{type:STRING, format:enum, enum:["EAST", "NORTH", "SOUTH", "WEST"]}` 2. To define apartment numbers: `{type:INTEGER, format:enum, enum:["101", "201", "301"]}`
"A String",
],
"example": "", # Optional. Example of an instance of this schema.
"format": "A String", # Optional. The format of the data. For `NUMBER` type, format can be `float` or `double`. For `INTEGER` type, format can be `int32` or `int64`. For `STRING` type, format can be `email`, `byte`, `date`, `date-time`, `password`, and other formats to further refine the data type.
"items": # Object with schema name: GoogleCloudAiplatformV1Schema # Optional. If type is `ARRAY`, `items` specifies the schema of elements in the array.
"maxItems": "A String", # Optional. If type is `ARRAY`, `max_items` specifies the maximum number of items in an array.
"maxLength": "A String", # Optional. If type is `STRING`, `max_length` specifies the maximum length of the string.
"maxProperties": "A String", # Optional. If type is `OBJECT`, `max_properties` specifies the maximum number of properties that can be provided.
"maximum": 3.14, # Optional. If type is `INTEGER` or `NUMBER`, `maximum` specifies the maximum allowed value.
"minItems": "A String", # Optional. If type is `ARRAY`, `min_items` specifies the minimum number of items in an array.
"minLength": "A String", # Optional. If type is `STRING`, `min_length` specifies the minimum length of the string.
"minProperties": "A String", # Optional. If type is `OBJECT`, `min_properties` specifies the minimum number of properties that can be provided.
"minimum": 3.14, # Optional. If type is `INTEGER` or `NUMBER`, `minimum` specifies the minimum allowed value.
"nullable": True or False, # Optional. Indicates if the value of this field can be null.
"pattern": "A String", # Optional. If type is `STRING`, `pattern` specifies a regular expression that the string must match.
"properties": { # Optional. If type is `OBJECT`, `properties` is a map of property names to schema definitions for each property of the object.
"a_key": # Object with schema name: GoogleCloudAiplatformV1Schema
},
"propertyOrdering": [ # Optional. Order of properties displayed or used where order matters. This is not a standard field in OpenAPI specification, but can be used to control the order of properties.
"A String",
],
"ref": "A String", # Optional. Allows referencing another schema definition to use in place of this schema. The value must be a valid reference to a schema in `defs`. For example, the following schema defines a reference to a schema node named "Pet": type: object properties: pet: ref: #/defs/Pet defs: Pet: type: object properties: name: type: string The value of the "pet" property is a reference to the schema node named "Pet". See details in https://json-schema.org/understanding-json-schema/structuring
"required": [ # Optional. If type is `OBJECT`, `required` lists the names of properties that must be present.
"A String",
],
"title": "A String", # Optional. Title for the schema.
"type": "A String", # Optional. Data type of the schema field.
},
"routingConfig": { # The configuration for routing the request to a specific model. This can be used to control which model is used for the generation, either automatically or by specifying a model name. # Optional. Routing configuration.
"autoMode": { # The configuration for automated routing. When automated routing is specified, the routing will be determined by the pretrained routing model and customer provided model routing preference. # In this mode, the model is selected automatically based on the content of the request.
"modelRoutingPreference": "A String", # The model routing preference.
},
"manualMode": { # The configuration for manual routing. When manual routing is specified, the model will be selected based on the model name provided. # In this mode, the model is specified manually.
"modelName": "A String", # The name of the model to use. Only public LLM models are accepted.
},
},
"seed": 42, # Optional. A seed for the random number generator. By setting a seed, you can make the model's output mostly deterministic. For a given prompt and parameters (like temperature, top_p, etc.), the model will produce the same response every time. However, it's not a guaranteed absolute deterministic behavior. This is different from parameters like `temperature`, which control the *level* of randomness. `seed` ensures that the "random" choices the model makes are the same on every run, making it essential for testing and ensuring reproducible results.
"speechConfig": { # Configuration for speech generation. # Optional. The speech generation config.
"languageCode": "A String", # Optional. The language code (ISO 639-1) for the speech synthesis.
"multiSpeakerVoiceConfig": { # Configuration for a multi-speaker text-to-speech request. # The configuration for a multi-speaker text-to-speech request. This field is mutually exclusive with `voice_config`.
"speakerVoiceConfigs": [ # Required. A list of configurations for the voices of the speakers. Exactly two speaker voice configurations must be provided.
{ # Configuration for a single speaker in a multi-speaker setup.
"speaker": "A String", # Required. The name of the speaker. This should be the same as the speaker name used in the prompt.
"voiceConfig": { # Configuration for a voice. # Required. The configuration for the voice of this speaker.
"prebuiltVoiceConfig": { # Configuration for a prebuilt voice. # The configuration for a prebuilt voice.
"voiceName": "A String", # The name of the prebuilt voice to use.
},
"replicatedVoiceConfig": { # The configuration for the replicated voice to use. # Optional. The configuration for a replicated voice. This enables users to replicate a voice from an audio sample.
"mimeType": "A String", # Optional. The mimetype of the voice sample. The only currently supported value is `audio/wav`. This represents 16-bit signed little-endian wav data, with a 24kHz sampling rate. `mime_type` will default to `audio/wav` if not set.
"voiceSampleAudio": "A String", # Optional. The sample of the custom voice.
},
},
},
],
},
"voiceConfig": { # Configuration for a voice. # The configuration for the voice to use.
"prebuiltVoiceConfig": { # Configuration for a prebuilt voice. # The configuration for a prebuilt voice.
"voiceName": "A String", # The name of the prebuilt voice to use.
},
"replicatedVoiceConfig": { # The configuration for the replicated voice to use. # Optional. The configuration for a replicated voice. This enables users to replicate a voice from an audio sample.
"mimeType": "A String", # Optional. The mimetype of the voice sample. The only currently supported value is `audio/wav`. This represents 16-bit signed little-endian wav data, with a 24kHz sampling rate. `mime_type` will default to `audio/wav` if not set.
"voiceSampleAudio": "A String", # Optional. The sample of the custom voice.
},
},
},
"stopSequences": [ # Optional. A list of character sequences that will stop the model from generating further tokens. If a stop sequence is generated, the output will end at that point. This is useful for controlling the length and structure of the output. For example, you can use ["\n", "###"] to stop generation at a new line or a specific marker.
"A String",
],
"temperature": 3.14, # Optional. Controls the randomness of the output. A higher temperature results in more creative and diverse responses, while a lower temperature makes the output more predictable and focused. The valid range is (0.0, 2.0].
"thinkingConfig": { # Configuration for the model's thinking features. "Thinking" is a process where the model breaks down a complex task into smaller, manageable steps. This allows the model to reason about the task, plan its approach, and execute the plan to generate a high-quality response. # Optional. Configuration for thinking features. An error will be returned if this field is set for models that don't support thinking.
"includeThoughts": True or False, # Optional. If true, the model will include its thoughts in the response. "Thoughts" are the intermediate steps the model takes to arrive at the final response. They can provide insights into the model's reasoning process and help with debugging. If this is true, thoughts are returned only when available.
"thinkingBudget": 42, # Optional. The token budget for the model's thinking process. The model will make a best effort to stay within this budget. This can be used to control the trade-off between response quality and latency.
"thinkingLevel": "A String", # Optional. The number of thoughts tokens that the model should generate.
},
"topK": 3.14, # Optional. Specifies the top-k sampling threshold. The model considers only the top k most probable tokens for the next token. This can be useful for generating more coherent and less random text. For example, a `top_k` of 40 means the model will choose the next word from the 40 most likely words.
"topP": 3.14, # Optional. Specifies the nucleus sampling threshold. The model considers only the smallest set of tokens whose cumulative probability is at least `top_p`. This helps generate more diverse and less repetitive responses. For example, a `top_p` of 0.9 means the model considers tokens until the cumulative probability of the tokens to select from reaches 0.9. It's recommended to adjust either temperature or `top_p`, but not both.
},
"labels": { # Optional. The labels with user-defined metadata for the request. It is used for billing and reporting only. Label keys and values can be no longer than 63 characters (Unicode codepoints) and can only contain lowercase letters, numeric characters, underscores, and dashes. International characters are allowed. Label values are optional. Label keys must start with a letter.
"a_key": "A String",
},
"modelArmorConfig": { # Configuration for Model Armor. Model Armor is a Google Cloud service that provides safety and security filtering for prompts and responses. It helps protect your AI applications from risks such as harmful content, sensitive data leakage, and prompt injection attacks. # Optional. Settings for prompt and response sanitization using the Model Armor service. If supplied, safety_settings must not be supplied.
"promptTemplateName": "A String", # Optional. The resource name of the Model Armor template to use for prompt screening. A Model Armor template is a set of customized filters and thresholds that define how Model Armor screens content. If specified, Model Armor will use this template to check the user's prompt for safety and security risks before it is sent to the model. The name must be in the format `projects/{project}/locations/{location}/templates/{template}`.
"responseTemplateName": "A String", # Optional. The resource name of the Model Armor template to use for response screening. A Model Armor template is a set of customized filters and thresholds that define how Model Armor screens content. If specified, Model Armor will use this template to check the model's response for safety and security risks before it is returned to the user. The name must be in the format `projects/{project}/locations/{location}/templates/{template}`.
},
"safetySettings": [ # Optional. Per request settings for blocking unsafe content. Enforced on GenerateContentResponse.candidates.
{ # A safety setting that affects the safety-blocking behavior. A SafetySetting consists of a harm category and a threshold for that category.
"category": "A String", # Required. The harm category to be blocked.
"method": "A String", # Optional. The method for blocking content. If not specified, the default behavior is to use the probability score.
"threshold": "A String", # Required. The threshold for blocking content. If the harm probability exceeds this threshold, the content will be blocked.