diff --git a/google/genai/_live_converters.py b/google/genai/_live_converters.py index 3b85070ee..e1a0aaa2e 100644 --- a/google/genai/_live_converters.py +++ b/google/genai/_live_converters.py @@ -23,6 +23,40 @@ from ._common import set_value_by_path as setv +def _AuthConfig_to_mldev( + from_object: Union[dict[str, Any], object], + parent_object: Optional[dict[str, Any]] = None, +) -> dict[str, Any]: + to_object: dict[str, Any] = {} + if getv(from_object, ['api_key']) is not None: + setv(to_object, ['apiKey'], getv(from_object, ['api_key'])) + + if getv(from_object, ['api_key_config']) is not None: + raise ValueError('api_key_config parameter is not supported in Gemini API.') + + if getv(from_object, ['auth_type']) is not None: + raise ValueError('auth_type parameter is not supported in Gemini API.') + + if getv(from_object, ['google_service_account_config']) is not None: + raise ValueError( + 'google_service_account_config parameter is not supported in Gemini' + ' API.' + ) + + if getv(from_object, ['http_basic_auth_config']) is not None: + raise ValueError( + 'http_basic_auth_config parameter is not supported in Gemini API.' + ) + + if getv(from_object, ['oauth_config']) is not None: + raise ValueError('oauth_config parameter is not supported in Gemini API.') + + if getv(from_object, ['oidc_config']) is not None: + raise ValueError('oidc_config parameter is not supported in Gemini API.') + + return to_object + + def _Blob_to_mldev( from_object: Union[dict[str, Any], object], parent_object: Optional[dict[str, Any]] = None, @@ -61,6 +95,27 @@ def _Content_to_mldev( return to_object +def _Content_to_vertex( + from_object: Union[dict[str, Any], object], + parent_object: Optional[dict[str, Any]] = None, +) -> dict[str, Any]: + to_object: dict[str, Any] = {} + if getv(from_object, ['parts']) is not None: + setv( + to_object, + ['parts'], + [ + _Part_to_vertex(item, to_object) + for item in getv(from_object, ['parts']) + ], + ) + + if getv(from_object, ['role']) is not None: + setv(to_object, ['role'], getv(from_object, ['role'])) + + return to_object + + def _FileData_to_mldev( from_object: Union[dict[str, Any], object], parent_object: Optional[dict[str, Any]] = None, @@ -226,13 +281,7 @@ def _GenerationConfig_to_vertex( setv(to_object, ['seed'], getv(from_object, ['seed'])) if getv(from_object, ['speech_config']) is not None: - setv( - to_object, - ['speechConfig'], - _SpeechConfig_to_vertex( - getv(from_object, ['speech_config']), to_object - ), - ) + setv(to_object, ['speechConfig'], getv(from_object, ['speech_config'])) if getv(from_object, ['stop_sequences']) is not None: setv(to_object, ['stopSequences'], getv(from_object, ['stop_sequences'])) @@ -263,7 +312,11 @@ def _GoogleMaps_to_mldev( ) -> dict[str, Any]: to_object: dict[str, Any] = {} if getv(from_object, ['auth_config']) is not None: - raise ValueError('auth_config parameter is not supported in Gemini API.') + setv( + to_object, + ['authConfig'], + _AuthConfig_to_mldev(getv(from_object, ['auth_config']), to_object), + ) if getv(from_object, ['enable_widget']) is not None: setv(to_object, ['enableWidget'], getv(from_object, ['enable_widget'])) @@ -276,14 +329,14 @@ def _GoogleSearch_to_mldev( parent_object: Optional[dict[str, Any]] = None, ) -> dict[str, Any]: to_object: dict[str, Any] = {} - if getv(from_object, ['exclude_domains']) is not None: + if getv(from_object, ['blocking_confidence']) is not None: raise ValueError( - 'exclude_domains parameter is not supported in Gemini API.' + 'blocking_confidence parameter is not supported in Gemini API.' ) - if getv(from_object, ['blocking_confidence']) is not None: + if getv(from_object, ['exclude_domains']) is not None: raise ValueError( - 'blocking_confidence parameter is not supported in Gemini API.' + 'exclude_domains parameter is not supported in Gemini API.' ) if getv(from_object, ['time_range_filter']) is not None: @@ -315,6 +368,27 @@ def _LiveClientContent_to_mldev( return to_object +def _LiveClientContent_to_vertex( + from_object: Union[dict[str, Any], object], + parent_object: Optional[dict[str, Any]] = None, +) -> dict[str, Any]: + to_object: dict[str, Any] = {} + if getv(from_object, ['turns']) is not None: + setv( + to_object, + ['turns'], + [ + _Content_to_vertex(item, to_object) + for item in getv(from_object, ['turns']) + ], + ) + + if getv(from_object, ['turn_complete']) is not None: + setv(to_object, ['turnComplete'], getv(from_object, ['turn_complete'])) + + return to_object + + def _LiveClientMessage_to_mldev( api_client: BaseApiClient, from_object: Union[dict[str, Any], object], @@ -370,7 +444,13 @@ def _LiveClientMessage_to_vertex( ) if getv(from_object, ['client_content']) is not None: - setv(to_object, ['clientContent'], getv(from_object, ['client_content'])) + setv( + to_object, + ['clientContent'], + _LiveClientContent_to_vertex( + getv(from_object, ['client_content']), to_object + ), + ) if getv(from_object, ['realtime_input']) is not None: setv( @@ -559,7 +639,9 @@ def _LiveClientSetup_to_vertex( setv( to_object, ['systemInstruction'], - t.t_content(getv(from_object, ['system_instruction'])), + _Content_to_vertex( + t.t_content(getv(from_object, ['system_instruction'])), to_object + ), ) if getv(from_object, ['tools']) is not None: @@ -673,7 +755,10 @@ def _LiveConnectConfig_to_mldev( setv( parent_object, ['setup', 'generationConfig', 'speechConfig'], - t.t_live_speech_config(getv(from_object, ['speech_config'])), + _SpeechConfig_to_mldev( + t.t_live_speech_config(getv(from_object, ['speech_config'])), + to_object, + ), ) if getv(from_object, ['thinking_config']) is not None: @@ -825,10 +910,7 @@ def _LiveConnectConfig_to_vertex( setv( parent_object, ['setup', 'generationConfig', 'speechConfig'], - _SpeechConfig_to_vertex( - t.t_live_speech_config(getv(from_object, ['speech_config'])), - to_object, - ), + t.t_live_speech_config(getv(from_object, ['speech_config'])), ) if getv(from_object, ['thinking_config']) is not None: @@ -849,7 +931,9 @@ def _LiveConnectConfig_to_vertex( setv( parent_object, ['setup', 'systemInstruction'], - t.t_content(getv(from_object, ['system_instruction'])), + _Content_to_vertex( + t.t_content(getv(from_object, ['system_instruction'])), to_object + ), ) if getv(from_object, ['tools']) is not None: @@ -1179,6 +1263,24 @@ def _LiveServerMessage_from_vertex( return to_object +def _MultiSpeakerVoiceConfig_to_mldev( + from_object: Union[dict[str, Any], object], + parent_object: Optional[dict[str, Any]] = None, +) -> dict[str, Any]: + to_object: dict[str, Any] = {} + if getv(from_object, ['speaker_voice_configs']) is not None: + setv( + to_object, + ['speakerVoiceConfigs'], + [ + _SpeakerVoiceConfig_to_mldev(item, to_object) + for item in getv(from_object, ['speaker_voice_configs']) + ], + ) + + return to_object + + def _Part_to_mldev( from_object: Union[dict[str, Any], object], parent_object: Optional[dict[str, Any]] = None, @@ -1243,6 +1345,67 @@ def _Part_to_mldev( if getv(from_object, ['video_metadata']) is not None: setv(to_object, ['videoMetadata'], getv(from_object, ['video_metadata'])) + if getv(from_object, ['part_metadata']) is not None: + setv(to_object, ['partMetadata'], getv(from_object, ['part_metadata'])) + + return to_object + + +def _Part_to_vertex( + from_object: Union[dict[str, Any], object], + parent_object: Optional[dict[str, Any]] = None, +) -> dict[str, Any]: + to_object: dict[str, Any] = {} + if getv(from_object, ['media_resolution']) is not None: + setv( + to_object, ['mediaResolution'], getv(from_object, ['media_resolution']) + ) + + if getv(from_object, ['code_execution_result']) is not None: + setv( + to_object, + ['codeExecutionResult'], + getv(from_object, ['code_execution_result']), + ) + + if getv(from_object, ['executable_code']) is not None: + setv(to_object, ['executableCode'], getv(from_object, ['executable_code'])) + + if getv(from_object, ['file_data']) is not None: + setv(to_object, ['fileData'], getv(from_object, ['file_data'])) + + if getv(from_object, ['function_call']) is not None: + setv(to_object, ['functionCall'], getv(from_object, ['function_call'])) + + if getv(from_object, ['function_response']) is not None: + setv( + to_object, + ['functionResponse'], + getv(from_object, ['function_response']), + ) + + if getv(from_object, ['inline_data']) is not None: + setv(to_object, ['inlineData'], getv(from_object, ['inline_data'])) + + if getv(from_object, ['text']) is not None: + setv(to_object, ['text'], getv(from_object, ['text'])) + + if getv(from_object, ['thought']) is not None: + setv(to_object, ['thought'], getv(from_object, ['thought'])) + + if getv(from_object, ['thought_signature']) is not None: + setv( + to_object, + ['thoughtSignature'], + getv(from_object, ['thought_signature']), + ) + + if getv(from_object, ['video_metadata']) is not None: + setv(to_object, ['videoMetadata'], getv(from_object, ['video_metadata'])) + + if getv(from_object, ['part_metadata']) is not None: + raise ValueError('part_metadata parameter is not supported in Vertex AI.') + return to_object @@ -1260,20 +1423,46 @@ def _SessionResumptionConfig_to_mldev( return to_object -def _SpeechConfig_to_vertex( +def _SpeakerVoiceConfig_to_mldev( from_object: Union[dict[str, Any], object], parent_object: Optional[dict[str, Any]] = None, ) -> dict[str, Any]: to_object: dict[str, Any] = {} - if getv(from_object, ['language_code']) is not None: - setv(to_object, ['languageCode'], getv(from_object, ['language_code'])) + if getv(from_object, ['speaker']) is not None: + setv(to_object, ['speaker'], getv(from_object, ['speaker'])) if getv(from_object, ['voice_config']) is not None: - setv(to_object, ['voiceConfig'], getv(from_object, ['voice_config'])) + setv( + to_object, + ['voiceConfig'], + _VoiceConfig_to_mldev(getv(from_object, ['voice_config']), to_object), + ) + + return to_object + + +def _SpeechConfig_to_mldev( + from_object: Union[dict[str, Any], object], + parent_object: Optional[dict[str, Any]] = None, +) -> dict[str, Any]: + to_object: dict[str, Any] = {} + if getv(from_object, ['language_code']) is not None: + setv(to_object, ['languageCode'], getv(from_object, ['language_code'])) if getv(from_object, ['multi_speaker_voice_config']) is not None: - raise ValueError( - 'multi_speaker_voice_config parameter is not supported in Vertex AI.' + setv( + to_object, + ['multiSpeakerVoiceConfig'], + _MultiSpeakerVoiceConfig_to_mldev( + getv(from_object, ['multi_speaker_voice_config']), to_object + ), + ) + + if getv(from_object, ['voice_config']) is not None: + setv( + to_object, + ['voiceConfig'], + _VoiceConfig_to_mldev(getv(from_object, ['voice_config']), to_object), ) return to_object @@ -1307,6 +1496,13 @@ def _Tool_to_mldev( if getv(from_object, ['file_search']) is not None: setv(to_object, ['fileSearch'], getv(from_object, ['file_search'])) + if getv(from_object, ['google_maps']) is not None: + setv( + to_object, + ['googleMaps'], + _GoogleMaps_to_mldev(getv(from_object, ['google_maps']), to_object), + ) + if getv(from_object, ['code_execution']) is not None: setv(to_object, ['codeExecution'], getv(from_object, ['code_execution'])) @@ -1315,13 +1511,6 @@ def _Tool_to_mldev( 'enterprise_web_search parameter is not supported in Gemini API.' ) - if getv(from_object, ['google_maps']) is not None: - setv( - to_object, - ['googleMaps'], - _GoogleMaps_to_mldev(getv(from_object, ['google_maps']), to_object), - ) - if getv(from_object, ['google_search']) is not None: setv( to_object, @@ -1366,6 +1555,9 @@ def _Tool_to_vertex( if getv(from_object, ['file_search']) is not None: raise ValueError('file_search parameter is not supported in Vertex AI.') + if getv(from_object, ['google_maps']) is not None: + setv(to_object, ['googleMaps'], getv(from_object, ['google_maps'])) + if getv(from_object, ['code_execution']) is not None: setv(to_object, ['codeExecution'], getv(from_object, ['code_execution'])) @@ -1376,9 +1568,6 @@ def _Tool_to_vertex( getv(from_object, ['enterprise_web_search']), ) - if getv(from_object, ['google_maps']) is not None: - setv(to_object, ['googleMaps'], getv(from_object, ['google_maps'])) - if getv(from_object, ['google_search']) is not None: setv(to_object, ['googleSearch'], getv(from_object, ['google_search'])) @@ -1465,3 +1654,23 @@ def _UsageMetadata_from_vertex( setv(to_object, ['traffic_type'], getv(from_object, ['trafficType'])) return to_object + + +def _VoiceConfig_to_mldev( + from_object: Union[dict[str, Any], object], + parent_object: Optional[dict[str, Any]] = None, +) -> dict[str, Any]: + to_object: dict[str, Any] = {} + if getv(from_object, ['prebuilt_voice_config']) is not None: + setv( + to_object, + ['prebuiltVoiceConfig'], + getv(from_object, ['prebuilt_voice_config']), + ) + + if getv(from_object, ['replicated_voice_config']) is not None: + raise ValueError( + 'replicated_voice_config parameter is not supported in Gemini API.' + ) + + return to_object diff --git a/google/genai/_tokens_converters.py b/google/genai/_tokens_converters.py index a6e1cd46d..209df9eba 100644 --- a/google/genai/_tokens_converters.py +++ b/google/genai/_tokens_converters.py @@ -23,6 +23,40 @@ from ._common import set_value_by_path as setv +def _AuthConfig_to_mldev( + from_object: Union[dict[str, Any], object], + parent_object: Optional[dict[str, Any]] = None, +) -> dict[str, Any]: + to_object: dict[str, Any] = {} + if getv(from_object, ['api_key']) is not None: + setv(to_object, ['apiKey'], getv(from_object, ['api_key'])) + + if getv(from_object, ['api_key_config']) is not None: + raise ValueError('api_key_config parameter is not supported in Gemini API.') + + if getv(from_object, ['auth_type']) is not None: + raise ValueError('auth_type parameter is not supported in Gemini API.') + + if getv(from_object, ['google_service_account_config']) is not None: + raise ValueError( + 'google_service_account_config parameter is not supported in Gemini' + ' API.' + ) + + if getv(from_object, ['http_basic_auth_config']) is not None: + raise ValueError( + 'http_basic_auth_config parameter is not supported in Gemini API.' + ) + + if getv(from_object, ['oauth_config']) is not None: + raise ValueError('oauth_config parameter is not supported in Gemini API.') + + if getv(from_object, ['oidc_config']) is not None: + raise ValueError('oidc_config parameter is not supported in Gemini API.') + + return to_object + + def _Blob_to_mldev( from_object: Union[dict[str, Any], object], parent_object: Optional[dict[str, Any]] = None, @@ -177,7 +211,11 @@ def _GoogleMaps_to_mldev( ) -> dict[str, Any]: to_object: dict[str, Any] = {} if getv(from_object, ['auth_config']) is not None: - raise ValueError('auth_config parameter is not supported in Gemini API.') + setv( + to_object, + ['authConfig'], + _AuthConfig_to_mldev(getv(from_object, ['auth_config']), to_object), + ) if getv(from_object, ['enable_widget']) is not None: setv(to_object, ['enableWidget'], getv(from_object, ['enable_widget'])) @@ -190,14 +228,14 @@ def _GoogleSearch_to_mldev( parent_object: Optional[dict[str, Any]] = None, ) -> dict[str, Any]: to_object: dict[str, Any] = {} - if getv(from_object, ['exclude_domains']) is not None: + if getv(from_object, ['blocking_confidence']) is not None: raise ValueError( - 'exclude_domains parameter is not supported in Gemini API.' + 'blocking_confidence parameter is not supported in Gemini API.' ) - if getv(from_object, ['blocking_confidence']) is not None: + if getv(from_object, ['exclude_domains']) is not None: raise ValueError( - 'blocking_confidence parameter is not supported in Gemini API.' + 'exclude_domains parameter is not supported in Gemini API.' ) if getv(from_object, ['time_range_filter']) is not None: @@ -275,7 +313,10 @@ def _LiveConnectConfig_to_mldev( setv( parent_object, ['setup', 'generationConfig', 'speechConfig'], - t.t_live_speech_config(getv(from_object, ['speech_config'])), + _SpeechConfig_to_mldev( + t.t_live_speech_config(getv(from_object, ['speech_config'])), + to_object, + ), ) if getv(from_object, ['thinking_config']) is not None: @@ -383,6 +424,24 @@ def _LiveConnectConstraints_to_mldev( return to_object +def _MultiSpeakerVoiceConfig_to_mldev( + from_object: Union[dict[str, Any], object], + parent_object: Optional[dict[str, Any]] = None, +) -> dict[str, Any]: + to_object: dict[str, Any] = {} + if getv(from_object, ['speaker_voice_configs']) is not None: + setv( + to_object, + ['speakerVoiceConfigs'], + [ + _SpeakerVoiceConfig_to_mldev(item, to_object) + for item in getv(from_object, ['speaker_voice_configs']) + ], + ) + + return to_object + + def _Part_to_mldev( from_object: Union[dict[str, Any], object], parent_object: Optional[dict[str, Any]] = None, @@ -447,6 +506,9 @@ def _Part_to_mldev( if getv(from_object, ['video_metadata']) is not None: setv(to_object, ['videoMetadata'], getv(from_object, ['video_metadata'])) + if getv(from_object, ['part_metadata']) is not None: + setv(to_object, ['partMetadata'], getv(from_object, ['part_metadata'])) + return to_object @@ -464,6 +526,51 @@ def _SessionResumptionConfig_to_mldev( return to_object +def _SpeakerVoiceConfig_to_mldev( + from_object: Union[dict[str, Any], object], + parent_object: Optional[dict[str, Any]] = None, +) -> dict[str, Any]: + to_object: dict[str, Any] = {} + if getv(from_object, ['speaker']) is not None: + setv(to_object, ['speaker'], getv(from_object, ['speaker'])) + + if getv(from_object, ['voice_config']) is not None: + setv( + to_object, + ['voiceConfig'], + _VoiceConfig_to_mldev(getv(from_object, ['voice_config']), to_object), + ) + + return to_object + + +def _SpeechConfig_to_mldev( + from_object: Union[dict[str, Any], object], + parent_object: Optional[dict[str, Any]] = None, +) -> dict[str, Any]: + to_object: dict[str, Any] = {} + if getv(from_object, ['language_code']) is not None: + setv(to_object, ['languageCode'], getv(from_object, ['language_code'])) + + if getv(from_object, ['multi_speaker_voice_config']) is not None: + setv( + to_object, + ['multiSpeakerVoiceConfig'], + _MultiSpeakerVoiceConfig_to_mldev( + getv(from_object, ['multi_speaker_voice_config']), to_object + ), + ) + + if getv(from_object, ['voice_config']) is not None: + setv( + to_object, + ['voiceConfig'], + _VoiceConfig_to_mldev(getv(from_object, ['voice_config']), to_object), + ) + + return to_object + + def _Tool_to_mldev( from_object: Union[dict[str, Any], object], parent_object: Optional[dict[str, Any]] = None, @@ -492,6 +599,13 @@ def _Tool_to_mldev( if getv(from_object, ['file_search']) is not None: setv(to_object, ['fileSearch'], getv(from_object, ['file_search'])) + if getv(from_object, ['google_maps']) is not None: + setv( + to_object, + ['googleMaps'], + _GoogleMaps_to_mldev(getv(from_object, ['google_maps']), to_object), + ) + if getv(from_object, ['code_execution']) is not None: setv(to_object, ['codeExecution'], getv(from_object, ['code_execution'])) @@ -500,13 +614,6 @@ def _Tool_to_mldev( 'enterprise_web_search parameter is not supported in Gemini API.' ) - if getv(from_object, ['google_maps']) is not None: - setv( - to_object, - ['googleMaps'], - _GoogleMaps_to_mldev(getv(from_object, ['google_maps']), to_object), - ) - if getv(from_object, ['google_search']) is not None: setv( to_object, @@ -518,3 +625,23 @@ def _Tool_to_mldev( setv(to_object, ['urlContext'], getv(from_object, ['url_context'])) return to_object + + +def _VoiceConfig_to_mldev( + from_object: Union[dict[str, Any], object], + parent_object: Optional[dict[str, Any]] = None, +) -> dict[str, Any]: + to_object: dict[str, Any] = {} + if getv(from_object, ['prebuilt_voice_config']) is not None: + setv( + to_object, + ['prebuiltVoiceConfig'], + getv(from_object, ['prebuilt_voice_config']), + ) + + if getv(from_object, ['replicated_voice_config']) is not None: + raise ValueError( + 'replicated_voice_config parameter is not supported in Gemini API.' + ) + + return to_object diff --git a/google/genai/batches.py b/google/genai/batches.py index c97af2210..77fdca6dc 100644 --- a/google/genai/batches.py +++ b/google/genai/batches.py @@ -35,6 +35,40 @@ logger = logging.getLogger('google_genai.batches') +def _AuthConfig_to_mldev( + from_object: Union[dict[str, Any], object], + parent_object: Optional[dict[str, Any]] = None, +) -> dict[str, Any]: + to_object: dict[str, Any] = {} + if getv(from_object, ['api_key']) is not None: + setv(to_object, ['apiKey'], getv(from_object, ['api_key'])) + + if getv(from_object, ['api_key_config']) is not None: + raise ValueError('api_key_config parameter is not supported in Gemini API.') + + if getv(from_object, ['auth_type']) is not None: + raise ValueError('auth_type parameter is not supported in Gemini API.') + + if getv(from_object, ['google_service_account_config']) is not None: + raise ValueError( + 'google_service_account_config parameter is not supported in Gemini' + ' API.' + ) + + if getv(from_object, ['http_basic_auth_config']) is not None: + raise ValueError( + 'http_basic_auth_config parameter is not supported in Gemini API.' + ) + + if getv(from_object, ['oauth_config']) is not None: + raise ValueError('oauth_config parameter is not supported in Gemini API.') + + if getv(from_object, ['oidc_config']) is not None: + raise ValueError('oidc_config parameter is not supported in Gemini API.') + + return to_object + + def _BatchJobDestination_from_mldev( from_object: Union[dict[str, Any], object], parent_object: Optional[dict[str, Any]] = None, @@ -408,9 +442,6 @@ def _Candidate_from_mldev( if getv(from_object, ['finishReason']) is not None: setv(to_object, ['finish_reason'], getv(from_object, ['finishReason'])) - if getv(from_object, ['avgLogprobs']) is not None: - setv(to_object, ['avg_logprobs'], getv(from_object, ['avgLogprobs'])) - if getv(from_object, ['groundingMetadata']) is not None: setv( to_object, @@ -418,6 +449,9 @@ def _Candidate_from_mldev( getv(from_object, ['groundingMetadata']), ) + if getv(from_object, ['avgLogprobs']) is not None: + setv(to_object, ['avg_logprobs'], getv(from_object, ['avgLogprobs'])) + if getv(from_object, ['index']) is not None: setv(to_object, ['index'], getv(from_object, ['index'])) @@ -988,7 +1022,9 @@ def _GenerateContentConfig_to_mldev( setv( to_object, ['speechConfig'], - t.t_speech_config(getv(from_object, ['speech_config'])), + _SpeechConfig_to_mldev( + t.t_speech_config(getv(from_object, ['speech_config'])), to_object + ), ) if getv(from_object, ['audio_timestamp']) is not None: @@ -1082,7 +1118,11 @@ def _GoogleMaps_to_mldev( ) -> dict[str, Any]: to_object: dict[str, Any] = {} if getv(from_object, ['auth_config']) is not None: - raise ValueError('auth_config parameter is not supported in Gemini API.') + setv( + to_object, + ['authConfig'], + _AuthConfig_to_mldev(getv(from_object, ['auth_config']), to_object), + ) if getv(from_object, ['enable_widget']) is not None: setv(to_object, ['enableWidget'], getv(from_object, ['enable_widget'])) @@ -1095,14 +1135,14 @@ def _GoogleSearch_to_mldev( parent_object: Optional[dict[str, Any]] = None, ) -> dict[str, Any]: to_object: dict[str, Any] = {} - if getv(from_object, ['exclude_domains']) is not None: + if getv(from_object, ['blocking_confidence']) is not None: raise ValueError( - 'exclude_domains parameter is not supported in Gemini API.' + 'blocking_confidence parameter is not supported in Gemini API.' ) - if getv(from_object, ['blocking_confidence']) is not None: + if getv(from_object, ['exclude_domains']) is not None: raise ValueError( - 'blocking_confidence parameter is not supported in Gemini API.' + 'exclude_domains parameter is not supported in Gemini API.' ) if getv(from_object, ['time_range_filter']) is not None: @@ -1134,6 +1174,16 @@ def _ImageConfig_to_mldev( 'output_compression_quality parameter is not supported in Gemini API.' ) + if getv(from_object, ['image_output_options']) is not None: + raise ValueError( + 'image_output_options parameter is not supported in Gemini API.' + ) + + if getv(from_object, ['person_generation']) is not None: + raise ValueError( + 'person_generation parameter is not supported in Gemini API.' + ) + return to_object @@ -1319,6 +1369,24 @@ def _ListBatchJobsResponse_from_vertex( return to_object +def _MultiSpeakerVoiceConfig_to_mldev( + from_object: Union[dict[str, Any], object], + parent_object: Optional[dict[str, Any]] = None, +) -> dict[str, Any]: + to_object: dict[str, Any] = {} + if getv(from_object, ['speaker_voice_configs']) is not None: + setv( + to_object, + ['speakerVoiceConfigs'], + [ + _SpeakerVoiceConfig_to_mldev(item, to_object) + for item in getv(from_object, ['speaker_voice_configs']) + ], + ) + + return to_object + + def _Part_to_mldev( from_object: Union[dict[str, Any], object], parent_object: Optional[dict[str, Any]] = None, @@ -1383,6 +1451,9 @@ def _Part_to_mldev( if getv(from_object, ['video_metadata']) is not None: setv(to_object, ['videoMetadata'], getv(from_object, ['video_metadata'])) + if getv(from_object, ['part_metadata']) is not None: + setv(to_object, ['partMetadata'], getv(from_object, ['part_metadata'])) + return to_object @@ -1403,6 +1474,51 @@ def _SafetySetting_to_mldev( return to_object +def _SpeakerVoiceConfig_to_mldev( + from_object: Union[dict[str, Any], object], + parent_object: Optional[dict[str, Any]] = None, +) -> dict[str, Any]: + to_object: dict[str, Any] = {} + if getv(from_object, ['speaker']) is not None: + setv(to_object, ['speaker'], getv(from_object, ['speaker'])) + + if getv(from_object, ['voice_config']) is not None: + setv( + to_object, + ['voiceConfig'], + _VoiceConfig_to_mldev(getv(from_object, ['voice_config']), to_object), + ) + + return to_object + + +def _SpeechConfig_to_mldev( + from_object: Union[dict[str, Any], object], + parent_object: Optional[dict[str, Any]] = None, +) -> dict[str, Any]: + to_object: dict[str, Any] = {} + if getv(from_object, ['language_code']) is not None: + setv(to_object, ['languageCode'], getv(from_object, ['language_code'])) + + if getv(from_object, ['multi_speaker_voice_config']) is not None: + setv( + to_object, + ['multiSpeakerVoiceConfig'], + _MultiSpeakerVoiceConfig_to_mldev( + getv(from_object, ['multi_speaker_voice_config']), to_object + ), + ) + + if getv(from_object, ['voice_config']) is not None: + setv( + to_object, + ['voiceConfig'], + _VoiceConfig_to_mldev(getv(from_object, ['voice_config']), to_object), + ) + + return to_object + + def _ToolConfig_to_mldev( from_object: Union[dict[str, Any], object], parent_object: Optional[dict[str, Any]] = None, @@ -1453,6 +1569,13 @@ def _Tool_to_mldev( if getv(from_object, ['file_search']) is not None: setv(to_object, ['fileSearch'], getv(from_object, ['file_search'])) + if getv(from_object, ['google_maps']) is not None: + setv( + to_object, + ['googleMaps'], + _GoogleMaps_to_mldev(getv(from_object, ['google_maps']), to_object), + ) + if getv(from_object, ['code_execution']) is not None: setv(to_object, ['codeExecution'], getv(from_object, ['code_execution'])) @@ -1461,13 +1584,6 @@ def _Tool_to_mldev( 'enterprise_web_search parameter is not supported in Gemini API.' ) - if getv(from_object, ['google_maps']) is not None: - setv( - to_object, - ['googleMaps'], - _GoogleMaps_to_mldev(getv(from_object, ['google_maps']), to_object), - ) - if getv(from_object, ['google_search']) is not None: setv( to_object, @@ -1481,6 +1597,26 @@ def _Tool_to_mldev( return to_object +def _VoiceConfig_to_mldev( + from_object: Union[dict[str, Any], object], + parent_object: Optional[dict[str, Any]] = None, +) -> dict[str, Any]: + to_object: dict[str, Any] = {} + if getv(from_object, ['prebuilt_voice_config']) is not None: + setv( + to_object, + ['prebuiltVoiceConfig'], + getv(from_object, ['prebuilt_voice_config']), + ) + + if getv(from_object, ['replicated_voice_config']) is not None: + raise ValueError( + 'replicated_voice_config parameter is not supported in Gemini API.' + ) + + return to_object + + class Batches(_api_module.BaseModule): def _create( diff --git a/google/genai/caches.py b/google/genai/caches.py index e905f7187..ede808466 100644 --- a/google/genai/caches.py +++ b/google/genai/caches.py @@ -33,6 +33,40 @@ logger = logging.getLogger('google_genai.caches') +def _AuthConfig_to_mldev( + from_object: Union[dict[str, Any], object], + parent_object: Optional[dict[str, Any]] = None, +) -> dict[str, Any]: + to_object: dict[str, Any] = {} + if getv(from_object, ['api_key']) is not None: + setv(to_object, ['apiKey'], getv(from_object, ['api_key'])) + + if getv(from_object, ['api_key_config']) is not None: + raise ValueError('api_key_config parameter is not supported in Gemini API.') + + if getv(from_object, ['auth_type']) is not None: + raise ValueError('auth_type parameter is not supported in Gemini API.') + + if getv(from_object, ['google_service_account_config']) is not None: + raise ValueError( + 'google_service_account_config parameter is not supported in Gemini' + ' API.' + ) + + if getv(from_object, ['http_basic_auth_config']) is not None: + raise ValueError( + 'http_basic_auth_config parameter is not supported in Gemini API.' + ) + + if getv(from_object, ['oauth_config']) is not None: + raise ValueError('oauth_config parameter is not supported in Gemini API.') + + if getv(from_object, ['oidc_config']) is not None: + raise ValueError('oidc_config parameter is not supported in Gemini API.') + + return to_object + + def _Blob_to_mldev( from_object: Union[dict[str, Any], object], parent_object: Optional[dict[str, Any]] = None, @@ -71,6 +105,27 @@ def _Content_to_mldev( return to_object +def _Content_to_vertex( + from_object: Union[dict[str, Any], object], + parent_object: Optional[dict[str, Any]] = None, +) -> dict[str, Any]: + to_object: dict[str, Any] = {} + if getv(from_object, ['parts']) is not None: + setv( + to_object, + ['parts'], + [ + _Part_to_vertex(item, to_object) + for item in getv(from_object, ['parts']) + ], + ) + + if getv(from_object, ['role']) is not None: + setv(to_object, ['role'], getv(from_object, ['role'])) + + return to_object + + def _CreateCachedContentConfig_to_mldev( from_object: Union[dict[str, Any], object], parent_object: Optional[dict[str, Any]] = None, @@ -147,14 +202,19 @@ def _CreateCachedContentConfig_to_vertex( setv( parent_object, ['contents'], - [item for item in t.t_contents(getv(from_object, ['contents']))], + [ + _Content_to_vertex(item, to_object) + for item in t.t_contents(getv(from_object, ['contents'])) + ], ) if getv(from_object, ['system_instruction']) is not None: setv( parent_object, ['systemInstruction'], - t.t_content(getv(from_object, ['system_instruction'])), + _Content_to_vertex( + t.t_content(getv(from_object, ['system_instruction'])), to_object + ), ) if getv(from_object, ['tools']) is not None: @@ -419,7 +479,11 @@ def _GoogleMaps_to_mldev( ) -> dict[str, Any]: to_object: dict[str, Any] = {} if getv(from_object, ['auth_config']) is not None: - raise ValueError('auth_config parameter is not supported in Gemini API.') + setv( + to_object, + ['authConfig'], + _AuthConfig_to_mldev(getv(from_object, ['auth_config']), to_object), + ) if getv(from_object, ['enable_widget']) is not None: setv(to_object, ['enableWidget'], getv(from_object, ['enable_widget'])) @@ -432,14 +496,14 @@ def _GoogleSearch_to_mldev( parent_object: Optional[dict[str, Any]] = None, ) -> dict[str, Any]: to_object: dict[str, Any] = {} - if getv(from_object, ['exclude_domains']) is not None: + if getv(from_object, ['blocking_confidence']) is not None: raise ValueError( - 'exclude_domains parameter is not supported in Gemini API.' + 'blocking_confidence parameter is not supported in Gemini API.' ) - if getv(from_object, ['blocking_confidence']) is not None: + if getv(from_object, ['exclude_domains']) is not None: raise ValueError( - 'blocking_confidence parameter is not supported in Gemini API.' + 'exclude_domains parameter is not supported in Gemini API.' ) if getv(from_object, ['time_range_filter']) is not None: @@ -626,6 +690,67 @@ def _Part_to_mldev( if getv(from_object, ['video_metadata']) is not None: setv(to_object, ['videoMetadata'], getv(from_object, ['video_metadata'])) + if getv(from_object, ['part_metadata']) is not None: + setv(to_object, ['partMetadata'], getv(from_object, ['part_metadata'])) + + return to_object + + +def _Part_to_vertex( + from_object: Union[dict[str, Any], object], + parent_object: Optional[dict[str, Any]] = None, +) -> dict[str, Any]: + to_object: dict[str, Any] = {} + if getv(from_object, ['media_resolution']) is not None: + setv( + to_object, ['mediaResolution'], getv(from_object, ['media_resolution']) + ) + + if getv(from_object, ['code_execution_result']) is not None: + setv( + to_object, + ['codeExecutionResult'], + getv(from_object, ['code_execution_result']), + ) + + if getv(from_object, ['executable_code']) is not None: + setv(to_object, ['executableCode'], getv(from_object, ['executable_code'])) + + if getv(from_object, ['file_data']) is not None: + setv(to_object, ['fileData'], getv(from_object, ['file_data'])) + + if getv(from_object, ['function_call']) is not None: + setv(to_object, ['functionCall'], getv(from_object, ['function_call'])) + + if getv(from_object, ['function_response']) is not None: + setv( + to_object, + ['functionResponse'], + getv(from_object, ['function_response']), + ) + + if getv(from_object, ['inline_data']) is not None: + setv(to_object, ['inlineData'], getv(from_object, ['inline_data'])) + + if getv(from_object, ['text']) is not None: + setv(to_object, ['text'], getv(from_object, ['text'])) + + if getv(from_object, ['thought']) is not None: + setv(to_object, ['thought'], getv(from_object, ['thought'])) + + if getv(from_object, ['thought_signature']) is not None: + setv( + to_object, + ['thoughtSignature'], + getv(from_object, ['thought_signature']), + ) + + if getv(from_object, ['video_metadata']) is not None: + setv(to_object, ['videoMetadata'], getv(from_object, ['video_metadata'])) + + if getv(from_object, ['part_metadata']) is not None: + raise ValueError('part_metadata parameter is not supported in Vertex AI.') + return to_object @@ -679,6 +804,13 @@ def _Tool_to_mldev( if getv(from_object, ['file_search']) is not None: setv(to_object, ['fileSearch'], getv(from_object, ['file_search'])) + if getv(from_object, ['google_maps']) is not None: + setv( + to_object, + ['googleMaps'], + _GoogleMaps_to_mldev(getv(from_object, ['google_maps']), to_object), + ) + if getv(from_object, ['code_execution']) is not None: setv(to_object, ['codeExecution'], getv(from_object, ['code_execution'])) @@ -687,13 +819,6 @@ def _Tool_to_mldev( 'enterprise_web_search parameter is not supported in Gemini API.' ) - if getv(from_object, ['google_maps']) is not None: - setv( - to_object, - ['googleMaps'], - _GoogleMaps_to_mldev(getv(from_object, ['google_maps']), to_object), - ) - if getv(from_object, ['google_search']) is not None: setv( to_object, @@ -738,6 +863,9 @@ def _Tool_to_vertex( if getv(from_object, ['file_search']) is not None: raise ValueError('file_search parameter is not supported in Vertex AI.') + if getv(from_object, ['google_maps']) is not None: + setv(to_object, ['googleMaps'], getv(from_object, ['google_maps'])) + if getv(from_object, ['code_execution']) is not None: setv(to_object, ['codeExecution'], getv(from_object, ['code_execution'])) @@ -748,9 +876,6 @@ def _Tool_to_vertex( getv(from_object, ['enterprise_web_search']), ) - if getv(from_object, ['google_maps']) is not None: - setv(to_object, ['googleMaps'], getv(from_object, ['google_maps'])) - if getv(from_object, ['google_search']) is not None: setv(to_object, ['googleSearch'], getv(from_object, ['google_search'])) diff --git a/google/genai/models.py b/google/genai/models.py index 9c53499a7..2d9bc7925 100644 --- a/google/genai/models.py +++ b/google/genai/models.py @@ -54,6 +54,40 @@ def _VideoGenerationReferenceType_to_mldev_enum_validate( raise ValueError(f'{enum_value} enum value is not supported in Gemini API.') +def _AuthConfig_to_mldev( + from_object: Union[dict[str, Any], object], + parent_object: Optional[dict[str, Any]] = None, +) -> dict[str, Any]: + to_object: dict[str, Any] = {} + if getv(from_object, ['api_key']) is not None: + setv(to_object, ['apiKey'], getv(from_object, ['api_key'])) + + if getv(from_object, ['api_key_config']) is not None: + raise ValueError('api_key_config parameter is not supported in Gemini API.') + + if getv(from_object, ['auth_type']) is not None: + raise ValueError('auth_type parameter is not supported in Gemini API.') + + if getv(from_object, ['google_service_account_config']) is not None: + raise ValueError( + 'google_service_account_config parameter is not supported in Gemini' + ' API.' + ) + + if getv(from_object, ['http_basic_auth_config']) is not None: + raise ValueError( + 'http_basic_auth_config parameter is not supported in Gemini API.' + ) + + if getv(from_object, ['oauth_config']) is not None: + raise ValueError('oauth_config parameter is not supported in Gemini API.') + + if getv(from_object, ['oidc_config']) is not None: + raise ValueError('oidc_config parameter is not supported in Gemini API.') + + return to_object + + def _Blob_to_mldev( from_object: Union[dict[str, Any], object], parent_object: Optional[dict[str, Any]] = None, @@ -94,9 +128,6 @@ def _Candidate_from_mldev( if getv(from_object, ['finishReason']) is not None: setv(to_object, ['finish_reason'], getv(from_object, ['finishReason'])) - if getv(from_object, ['avgLogprobs']) is not None: - setv(to_object, ['avg_logprobs'], getv(from_object, ['avgLogprobs'])) - if getv(from_object, ['groundingMetadata']) is not None: setv( to_object, @@ -104,6 +135,9 @@ def _Candidate_from_mldev( getv(from_object, ['groundingMetadata']), ) + if getv(from_object, ['avgLogprobs']) is not None: + setv(to_object, ['avg_logprobs'], getv(from_object, ['avgLogprobs'])) + if getv(from_object, ['index']) is not None: setv(to_object, ['index'], getv(from_object, ['index'])) @@ -159,7 +193,10 @@ def _ComputeTokensParameters_to_vertex( setv( to_object, ['contents'], - [item for item in t.t_contents(getv(from_object, ['contents']))], + [ + _Content_to_vertex(item, to_object) + for item in t.t_contents(getv(from_object, ['contents'])) + ], ) return to_object @@ -240,6 +277,27 @@ def _Content_to_mldev( return to_object +def _Content_to_vertex( + from_object: Union[dict[str, Any], object], + parent_object: Optional[dict[str, Any]] = None, +) -> dict[str, Any]: + to_object: dict[str, Any] = {} + if getv(from_object, ['parts']) is not None: + setv( + to_object, + ['parts'], + [ + _Part_to_vertex(item, to_object) + for item in getv(from_object, ['parts']) + ], + ) + + if getv(from_object, ['role']) is not None: + setv(to_object, ['role'], getv(from_object, ['role'])) + + return to_object + + def _ControlReferenceConfig_to_vertex( from_object: Union[dict[str, Any], object], parent_object: Optional[dict[str, Any]] = None, @@ -290,7 +348,9 @@ def _CountTokensConfig_to_vertex( setv( parent_object, ['systemInstruction'], - t.t_content(getv(from_object, ['system_instruction'])), + _Content_to_vertex( + t.t_content(getv(from_object, ['system_instruction'])), to_object + ), ) if getv(from_object, ['tools']) is not None: @@ -361,7 +421,10 @@ def _CountTokensParameters_to_vertex( setv( to_object, ['contents'], - [item for item in t.t_contents(getv(from_object, ['contents']))], + [ + _Content_to_vertex(item, to_object) + for item in t.t_contents(getv(from_object, ['contents'])) + ], ) if getv(from_object, ['config']) is not None: @@ -1092,7 +1155,9 @@ def _GenerateContentConfig_to_mldev( setv( to_object, ['speechConfig'], - t.t_speech_config(getv(from_object, ['speech_config'])), + _SpeechConfig_to_mldev( + t.t_speech_config(getv(from_object, ['speech_config'])), to_object + ), ) if getv(from_object, ['audio_timestamp']) is not None: @@ -1124,7 +1189,9 @@ def _GenerateContentConfig_to_vertex( setv( parent_object, ['systemInstruction'], - t.t_content(getv(from_object, ['system_instruction'])), + _Content_to_vertex( + t.t_content(getv(from_object, ['system_instruction'])), to_object + ), ) if getv(from_object, ['temperature']) is not None: @@ -1251,9 +1318,7 @@ def _GenerateContentConfig_to_vertex( setv( to_object, ['speechConfig'], - _SpeechConfig_to_vertex( - t.t_speech_config(getv(from_object, ['speech_config'])), to_object - ), + t.t_speech_config(getv(from_object, ['speech_config'])), ) if getv(from_object, ['audio_timestamp']) is not None: @@ -1324,7 +1389,10 @@ def _GenerateContentParameters_to_vertex( setv( to_object, ['contents'], - [item for item in t.t_contents(getv(from_object, ['contents']))], + [ + _Content_to_vertex(item, to_object) + for item in t.t_contents(getv(from_object, ['contents'])) + ], ) if getv(from_object, ['config']) is not None: @@ -2434,13 +2502,7 @@ def _GenerationConfig_to_vertex( setv(to_object, ['seed'], getv(from_object, ['seed'])) if getv(from_object, ['speech_config']) is not None: - setv( - to_object, - ['speechConfig'], - _SpeechConfig_to_vertex( - getv(from_object, ['speech_config']), to_object - ), - ) + setv(to_object, ['speechConfig'], getv(from_object, ['speech_config'])) if getv(from_object, ['stop_sequences']) is not None: setv(to_object, ['stopSequences'], getv(from_object, ['stop_sequences'])) @@ -2503,7 +2565,11 @@ def _GoogleMaps_to_mldev( ) -> dict[str, Any]: to_object: dict[str, Any] = {} if getv(from_object, ['auth_config']) is not None: - raise ValueError('auth_config parameter is not supported in Gemini API.') + setv( + to_object, + ['authConfig'], + _AuthConfig_to_mldev(getv(from_object, ['auth_config']), to_object), + ) if getv(from_object, ['enable_widget']) is not None: setv(to_object, ['enableWidget'], getv(from_object, ['enable_widget'])) @@ -2516,14 +2582,14 @@ def _GoogleSearch_to_mldev( parent_object: Optional[dict[str, Any]] = None, ) -> dict[str, Any]: to_object: dict[str, Any] = {} - if getv(from_object, ['exclude_domains']) is not None: + if getv(from_object, ['blocking_confidence']) is not None: raise ValueError( - 'exclude_domains parameter is not supported in Gemini API.' + 'blocking_confidence parameter is not supported in Gemini API.' ) - if getv(from_object, ['blocking_confidence']) is not None: + if getv(from_object, ['exclude_domains']) is not None: raise ValueError( - 'blocking_confidence parameter is not supported in Gemini API.' + 'exclude_domains parameter is not supported in Gemini API.' ) if getv(from_object, ['time_range_filter']) is not None: @@ -2555,6 +2621,16 @@ def _ImageConfig_to_mldev( 'output_compression_quality parameter is not supported in Gemini API.' ) + if getv(from_object, ['image_output_options']) is not None: + raise ValueError( + 'image_output_options parameter is not supported in Gemini API.' + ) + + if getv(from_object, ['person_generation']) is not None: + raise ValueError( + 'person_generation parameter is not supported in Gemini API.' + ) + return to_object @@ -2583,6 +2659,20 @@ def _ImageConfig_to_vertex( getv(from_object, ['output_compression_quality']), ) + if getv(from_object, ['image_output_options']) is not None: + setv( + to_object, + ['imageOutputOptions'], + getv(from_object, ['image_output_options']), + ) + + if getv(from_object, ['person_generation']) is not None: + setv( + to_object, + ['personGeneration'], + getv(from_object, ['person_generation']), + ) + return to_object @@ -2942,6 +3032,24 @@ def _Model_from_vertex( return to_object +def _MultiSpeakerVoiceConfig_to_mldev( + from_object: Union[dict[str, Any], object], + parent_object: Optional[dict[str, Any]] = None, +) -> dict[str, Any]: + to_object: dict[str, Any] = {} + if getv(from_object, ['speaker_voice_configs']) is not None: + setv( + to_object, + ['speakerVoiceConfigs'], + [ + _SpeakerVoiceConfig_to_mldev(item, to_object) + for item in getv(from_object, ['speaker_voice_configs']) + ], + ) + + return to_object + + def _Part_to_mldev( from_object: Union[dict[str, Any], object], parent_object: Optional[dict[str, Any]] = None, @@ -3006,6 +3114,67 @@ def _Part_to_mldev( if getv(from_object, ['video_metadata']) is not None: setv(to_object, ['videoMetadata'], getv(from_object, ['video_metadata'])) + if getv(from_object, ['part_metadata']) is not None: + setv(to_object, ['partMetadata'], getv(from_object, ['part_metadata'])) + + return to_object + + +def _Part_to_vertex( + from_object: Union[dict[str, Any], object], + parent_object: Optional[dict[str, Any]] = None, +) -> dict[str, Any]: + to_object: dict[str, Any] = {} + if getv(from_object, ['media_resolution']) is not None: + setv( + to_object, ['mediaResolution'], getv(from_object, ['media_resolution']) + ) + + if getv(from_object, ['code_execution_result']) is not None: + setv( + to_object, + ['codeExecutionResult'], + getv(from_object, ['code_execution_result']), + ) + + if getv(from_object, ['executable_code']) is not None: + setv(to_object, ['executableCode'], getv(from_object, ['executable_code'])) + + if getv(from_object, ['file_data']) is not None: + setv(to_object, ['fileData'], getv(from_object, ['file_data'])) + + if getv(from_object, ['function_call']) is not None: + setv(to_object, ['functionCall'], getv(from_object, ['function_call'])) + + if getv(from_object, ['function_response']) is not None: + setv( + to_object, + ['functionResponse'], + getv(from_object, ['function_response']), + ) + + if getv(from_object, ['inline_data']) is not None: + setv(to_object, ['inlineData'], getv(from_object, ['inline_data'])) + + if getv(from_object, ['text']) is not None: + setv(to_object, ['text'], getv(from_object, ['text'])) + + if getv(from_object, ['thought']) is not None: + setv(to_object, ['thought'], getv(from_object, ['thought'])) + + if getv(from_object, ['thought_signature']) is not None: + setv( + to_object, + ['thoughtSignature'], + getv(from_object, ['thought_signature']), + ) + + if getv(from_object, ['video_metadata']) is not None: + setv(to_object, ['videoMetadata'], getv(from_object, ['video_metadata'])) + + if getv(from_object, ['part_metadata']) is not None: + raise ValueError('part_metadata parameter is not supported in Vertex AI.') + return to_object @@ -3415,20 +3584,46 @@ def _SegmentImageSource_to_vertex( return to_object -def _SpeechConfig_to_vertex( +def _SpeakerVoiceConfig_to_mldev( from_object: Union[dict[str, Any], object], parent_object: Optional[dict[str, Any]] = None, ) -> dict[str, Any]: to_object: dict[str, Any] = {} - if getv(from_object, ['language_code']) is not None: - setv(to_object, ['languageCode'], getv(from_object, ['language_code'])) + if getv(from_object, ['speaker']) is not None: + setv(to_object, ['speaker'], getv(from_object, ['speaker'])) if getv(from_object, ['voice_config']) is not None: - setv(to_object, ['voiceConfig'], getv(from_object, ['voice_config'])) + setv( + to_object, + ['voiceConfig'], + _VoiceConfig_to_mldev(getv(from_object, ['voice_config']), to_object), + ) + + return to_object + + +def _SpeechConfig_to_mldev( + from_object: Union[dict[str, Any], object], + parent_object: Optional[dict[str, Any]] = None, +) -> dict[str, Any]: + to_object: dict[str, Any] = {} + if getv(from_object, ['language_code']) is not None: + setv(to_object, ['languageCode'], getv(from_object, ['language_code'])) if getv(from_object, ['multi_speaker_voice_config']) is not None: - raise ValueError( - 'multi_speaker_voice_config parameter is not supported in Vertex AI.' + setv( + to_object, + ['multiSpeakerVoiceConfig'], + _MultiSpeakerVoiceConfig_to_mldev( + getv(from_object, ['multi_speaker_voice_config']), to_object + ), + ) + + if getv(from_object, ['voice_config']) is not None: + setv( + to_object, + ['voiceConfig'], + _VoiceConfig_to_mldev(getv(from_object, ['voice_config']), to_object), ) return to_object @@ -3484,6 +3679,13 @@ def _Tool_to_mldev( if getv(from_object, ['file_search']) is not None: setv(to_object, ['fileSearch'], getv(from_object, ['file_search'])) + if getv(from_object, ['google_maps']) is not None: + setv( + to_object, + ['googleMaps'], + _GoogleMaps_to_mldev(getv(from_object, ['google_maps']), to_object), + ) + if getv(from_object, ['code_execution']) is not None: setv(to_object, ['codeExecution'], getv(from_object, ['code_execution'])) @@ -3492,13 +3694,6 @@ def _Tool_to_mldev( 'enterprise_web_search parameter is not supported in Gemini API.' ) - if getv(from_object, ['google_maps']) is not None: - setv( - to_object, - ['googleMaps'], - _GoogleMaps_to_mldev(getv(from_object, ['google_maps']), to_object), - ) - if getv(from_object, ['google_search']) is not None: setv( to_object, @@ -3543,6 +3738,9 @@ def _Tool_to_vertex( if getv(from_object, ['file_search']) is not None: raise ValueError('file_search parameter is not supported in Vertex AI.') + if getv(from_object, ['google_maps']) is not None: + setv(to_object, ['googleMaps'], getv(from_object, ['google_maps'])) + if getv(from_object, ['code_execution']) is not None: setv(to_object, ['codeExecution'], getv(from_object, ['code_execution'])) @@ -3553,9 +3751,6 @@ def _Tool_to_vertex( getv(from_object, ['enterprise_web_search']), ) - if getv(from_object, ['google_maps']) is not None: - setv(to_object, ['googleMaps'], getv(from_object, ['google_maps'])) - if getv(from_object, ['google_search']) is not None: setv(to_object, ['googleSearch'], getv(from_object, ['google_search'])) @@ -3946,6 +4141,26 @@ def _Video_to_vertex( return to_object +def _VoiceConfig_to_mldev( + from_object: Union[dict[str, Any], object], + parent_object: Optional[dict[str, Any]] = None, +) -> dict[str, Any]: + to_object: dict[str, Any] = {} + if getv(from_object, ['prebuilt_voice_config']) is not None: + setv( + to_object, + ['prebuiltVoiceConfig'], + getv(from_object, ['prebuilt_voice_config']), + ) + + if getv(from_object, ['replicated_voice_config']) is not None: + raise ValueError( + 'replicated_voice_config parameter is not supported in Gemini API.' + ) + + return to_object + + class Models(_api_module.BaseModule): def _generate_content( diff --git a/google/genai/tests/models/test_generate_content.py b/google/genai/tests/models/test_generate_content.py index e32163efb..c6d5628c8 100644 --- a/google/genai/tests/models/test_generate_content.py +++ b/google/genai/tests/models/test_generate_content.py @@ -375,7 +375,6 @@ class InstrumentEnum(Enum): ), pytest_helper.TestTableItem( name='test_speech_with_multi_speaker_voice_config', - exception_if_vertex='not supported', parameters=types._GenerateContentParameters( model='gemini-2.5-flash-preview-tts', contents=t.t_contents( @@ -410,7 +409,7 @@ class InstrumentEnum(Enum): ), pytest_helper.TestTableItem( name='test_speech_error_with_speech_config_and_multi_speech_config', - exception_if_vertex='not supported', + exception_if_vertex='mutually exclusive', exception_if_mldev='mutually exclusive', parameters=types._GenerateContentParameters( model='gemini-2.5-flash-preview-tts', diff --git a/google/genai/tunings.py b/google/genai/tunings.py index 7c8d5c086..5984caa2f 100644 --- a/google/genai/tunings.py +++ b/google/genai/tunings.py @@ -674,13 +674,7 @@ def _GenerationConfig_to_vertex( setv(to_object, ['seed'], getv(from_object, ['seed'])) if getv(from_object, ['speech_config']) is not None: - setv( - to_object, - ['speechConfig'], - _SpeechConfig_to_vertex( - getv(from_object, ['speech_config']), to_object, root_object - ), - ) + setv(to_object, ['speechConfig'], getv(from_object, ['speech_config'])) if getv(from_object, ['stop_sequences']) is not None: setv(to_object, ['stopSequences'], getv(from_object, ['stop_sequences'])) @@ -861,26 +855,6 @@ def _ListTuningJobsResponse_from_vertex( return to_object -def _SpeechConfig_to_vertex( - from_object: Union[dict[str, Any], object], - parent_object: Optional[dict[str, Any]] = None, - root_object: Optional[Union[dict[str, Any], object]] = None, -) -> dict[str, Any]: - to_object: dict[str, Any] = {} - if getv(from_object, ['language_code']) is not None: - setv(to_object, ['languageCode'], getv(from_object, ['language_code'])) - - if getv(from_object, ['voice_config']) is not None: - setv(to_object, ['voiceConfig'], getv(from_object, ['voice_config'])) - - if getv(from_object, ['multi_speaker_voice_config']) is not None: - raise ValueError( - 'multi_speaker_voice_config parameter is not supported in Vertex AI.' - ) - - return to_object - - def _TunedModel_from_mldev( from_object: Union[dict[str, Any], object], parent_object: Optional[dict[str, Any]] = None, @@ -1121,9 +1095,23 @@ def _TuningJob_from_vertex( to_object, ['custom_base_model'], getv(from_object, ['customBaseModel']) ) + if getv(from_object, ['evaluateDatasetRuns']) is not None: + setv( + to_object, + ['evaluate_dataset_runs'], + [item for item in getv(from_object, ['evaluateDatasetRuns'])], + ) + if getv(from_object, ['experiment']) is not None: setv(to_object, ['experiment'], getv(from_object, ['experiment'])) + if getv(from_object, ['fullFineTuningSpec']) is not None: + setv( + to_object, + ['full_fine_tuning_spec'], + getv(from_object, ['fullFineTuningSpec']), + ) + if getv(from_object, ['labels']) is not None: setv(to_object, ['labels'], getv(from_object, ['labels'])) @@ -1143,6 +1131,9 @@ def _TuningJob_from_vertex( getv(from_object, ['tunedModelDisplayName']), ) + if getv(from_object, ['tuningJobState']) is not None: + setv(to_object, ['tuning_job_state'], getv(from_object, ['tuningJobState'])) + if getv(from_object, ['veoTuningSpec']) is not None: setv(to_object, ['veo_tuning_spec'], getv(from_object, ['veoTuningSpec'])) diff --git a/google/genai/types.py b/google/genai/types.py index 24a104f38..c2dedf0ed 100644 --- a/google/genai/types.py +++ b/google/genai/types.py @@ -196,18 +196,13 @@ class Mode(_common.CaseInSensitiveEnum): """Run retrieval only when system decides it is necessary.""" -class ApiSpec(_common.CaseInSensitiveEnum): - """The API spec that the external API implements. - - This enum is not supported in Gemini API. - """ +class Environment(_common.CaseInSensitiveEnum): + """The environment being operated.""" - API_SPEC_UNSPECIFIED = 'API_SPEC_UNSPECIFIED' - """Unspecified API spec. This value should not be used.""" - SIMPLE_SEARCH = 'SIMPLE_SEARCH' - """Simple search API spec.""" - ELASTIC_SEARCH = 'ELASTIC_SEARCH' - """Elastic search API spec.""" + ENVIRONMENT_UNSPECIFIED = 'ENVIRONMENT_UNSPECIFIED' + """Defaults to browser.""" + ENVIRONMENT_BROWSER = 'ENVIRONMENT_BROWSER' + """Operates in a web browser.""" class AuthType(_common.CaseInSensitiveEnum): @@ -244,6 +239,20 @@ class HttpElementLocation(_common.CaseInSensitiveEnum): """Element is in the HTTP request cookie.""" +class ApiSpec(_common.CaseInSensitiveEnum): + """The API spec that the external API implements. + + This enum is not supported in Gemini API. + """ + + API_SPEC_UNSPECIFIED = 'API_SPEC_UNSPECIFIED' + """Unspecified API spec. This value should not be used.""" + SIMPLE_SEARCH = 'SIMPLE_SEARCH' + """Simple search API spec.""" + ELASTIC_SEARCH = 'ELASTIC_SEARCH' + """Elastic search API spec.""" + + class PhishBlockThreshold(_common.CaseInSensitiveEnum): """Sites with confidence level chosen & above this value will be blocked from the search results. @@ -267,52 +276,63 @@ class PhishBlockThreshold(_common.CaseInSensitiveEnum): class ThinkingLevel(_common.CaseInSensitiveEnum): - """The level of thoughts tokens that the model should generate.""" + """The number of thoughts tokens that the model should generate.""" THINKING_LEVEL_UNSPECIFIED = 'THINKING_LEVEL_UNSPECIFIED' - """Default value.""" + """Unspecified thinking level.""" LOW = 'LOW' """Low thinking level.""" HIGH = 'HIGH' """High thinking level.""" +class PersonGeneration(_common.CaseInSensitiveEnum): + """Enum that controls the generation of people.""" + + DONT_ALLOW = 'DONT_ALLOW' + """Block generation of images of people.""" + ALLOW_ADULT = 'ALLOW_ADULT' + """Generate images of adults, but not children.""" + ALLOW_ALL = 'ALLOW_ALL' + """Generate images that include adults and children.""" + + class HarmCategory(_common.CaseInSensitiveEnum): - """Harm category.""" + """The harm category to be blocked.""" HARM_CATEGORY_UNSPECIFIED = 'HARM_CATEGORY_UNSPECIFIED' - """The harm category is unspecified.""" + """Default value. This value is unused.""" HARM_CATEGORY_HARASSMENT = 'HARM_CATEGORY_HARASSMENT' - """The harm category is harassment.""" + """Abusive, threatening, or content intended to bully, torment, or ridicule.""" HARM_CATEGORY_HATE_SPEECH = 'HARM_CATEGORY_HATE_SPEECH' - """The harm category is hate speech.""" + """Content that promotes violence or incites hatred against individuals or groups based on certain attributes.""" HARM_CATEGORY_SEXUALLY_EXPLICIT = 'HARM_CATEGORY_SEXUALLY_EXPLICIT' - """The harm category is sexually explicit content.""" + """Content that contains sexually explicit material.""" HARM_CATEGORY_DANGEROUS_CONTENT = 'HARM_CATEGORY_DANGEROUS_CONTENT' - """The harm category is dangerous content.""" + """Content that promotes, facilitates, or enables dangerous activities.""" HARM_CATEGORY_CIVIC_INTEGRITY = 'HARM_CATEGORY_CIVIC_INTEGRITY' """Deprecated: Election filter is not longer supported. The harm category is civic integrity.""" HARM_CATEGORY_IMAGE_HATE = 'HARM_CATEGORY_IMAGE_HATE' - """The harm category is image hate. This enum value is not supported in Gemini API.""" + """Images that contain hate speech. This enum value is not supported in Gemini API.""" HARM_CATEGORY_IMAGE_DANGEROUS_CONTENT = ( 'HARM_CATEGORY_IMAGE_DANGEROUS_CONTENT' ) - """The harm category is image dangerous content. This enum value is not supported in Gemini API.""" + """Images that contain dangerous content. This enum value is not supported in Gemini API.""" HARM_CATEGORY_IMAGE_HARASSMENT = 'HARM_CATEGORY_IMAGE_HARASSMENT' - """The harm category is image harassment. This enum value is not supported in Gemini API.""" + """Images that contain harassment. This enum value is not supported in Gemini API.""" HARM_CATEGORY_IMAGE_SEXUALLY_EXPLICIT = ( 'HARM_CATEGORY_IMAGE_SEXUALLY_EXPLICIT' ) - """The harm category is image sexually explicit content. This enum value is not supported in Gemini API.""" + """Images that contain sexually explicit content. This enum value is not supported in Gemini API.""" HARM_CATEGORY_JAILBREAK = 'HARM_CATEGORY_JAILBREAK' - """The harm category is for jailbreak prompts. This enum value is not supported in Gemini API.""" + """Prompts designed to bypass safety filters. This enum value is not supported in Gemini API.""" class HarmBlockMethod(_common.CaseInSensitiveEnum): - """Specify if the threshold is used for probability or severity score. + """The method for blocking content. - If not specified, the threshold is used for probability score. This enum is - not supported in Gemini API. + If not specified, the default behavior is to use the probability score. This + enum is not supported in Gemini API. """ HARM_BLOCK_METHOD_UNSPECIFIED = 'HARM_BLOCK_METHOD_UNSPECIFIED' @@ -324,20 +344,23 @@ class HarmBlockMethod(_common.CaseInSensitiveEnum): class HarmBlockThreshold(_common.CaseInSensitiveEnum): - """The harm block threshold.""" + """The threshold for blocking content. + + If the harm probability exceeds this threshold, the content will be blocked. + """ HARM_BLOCK_THRESHOLD_UNSPECIFIED = 'HARM_BLOCK_THRESHOLD_UNSPECIFIED' - """Unspecified harm block threshold.""" + """The harm block threshold is unspecified.""" BLOCK_LOW_AND_ABOVE = 'BLOCK_LOW_AND_ABOVE' - """Block low threshold and above (i.e. block more).""" + """Block content with a low harm probability or higher.""" BLOCK_MEDIUM_AND_ABOVE = 'BLOCK_MEDIUM_AND_ABOVE' - """Block medium threshold and above.""" + """Block content with a medium harm probability or higher.""" BLOCK_ONLY_HIGH = 'BLOCK_ONLY_HIGH' - """Block only high threshold (i.e. block less).""" + """Block content with a high harm probability.""" BLOCK_NONE = 'BLOCK_NONE' - """Block none.""" + """Do not block any content, regardless of its harm probability.""" OFF = 'OFF' - """Turn off the safety filter.""" + """Turn off the safety filter entirely.""" class FinishReason(_common.CaseInSensitiveEnum): @@ -379,47 +402,48 @@ class FinishReason(_common.CaseInSensitiveEnum): class HarmProbability(_common.CaseInSensitiveEnum): - """Output only. Harm probability levels in the content.""" + """Output only. The probability of harm for this category.""" HARM_PROBABILITY_UNSPECIFIED = 'HARM_PROBABILITY_UNSPECIFIED' - """Harm probability unspecified.""" + """The harm probability is unspecified.""" NEGLIGIBLE = 'NEGLIGIBLE' - """Negligible level of harm.""" + """The harm probability is negligible.""" LOW = 'LOW' - """Low level of harm.""" + """The harm probability is low.""" MEDIUM = 'MEDIUM' - """Medium level of harm.""" + """The harm probability is medium.""" HIGH = 'HIGH' - """High level of harm.""" + """The harm probability is high.""" class HarmSeverity(_common.CaseInSensitiveEnum): """Output only. - Harm severity levels in the content. This enum is not supported in Gemini API. + The severity of harm for this category. This enum is not supported in Gemini + API. """ HARM_SEVERITY_UNSPECIFIED = 'HARM_SEVERITY_UNSPECIFIED' - """Harm severity unspecified.""" + """The harm severity is unspecified.""" HARM_SEVERITY_NEGLIGIBLE = 'HARM_SEVERITY_NEGLIGIBLE' - """Negligible level of harm severity.""" + """The harm severity is negligible.""" HARM_SEVERITY_LOW = 'HARM_SEVERITY_LOW' - """Low level of harm severity.""" + """The harm severity is low.""" HARM_SEVERITY_MEDIUM = 'HARM_SEVERITY_MEDIUM' - """Medium level of harm severity.""" + """The harm severity is medium.""" HARM_SEVERITY_HIGH = 'HARM_SEVERITY_HIGH' - """High level of harm severity.""" + """The harm severity is high.""" class UrlRetrievalStatus(_common.CaseInSensitiveEnum): - """Status of the url retrieval.""" + """The status of the URL retrieval.""" URL_RETRIEVAL_STATUS_UNSPECIFIED = 'URL_RETRIEVAL_STATUS_UNSPECIFIED' """Default value. This value is unused.""" URL_RETRIEVAL_STATUS_SUCCESS = 'URL_RETRIEVAL_STATUS_SUCCESS' - """Url retrieval is successful.""" + """The URL was retrieved successfully.""" URL_RETRIEVAL_STATUS_ERROR = 'URL_RETRIEVAL_STATUS_ERROR' - """Url retrieval is failed due to error.""" + """The URL retrieval failed.""" URL_RETRIEVAL_STATUS_PAYWALL = 'URL_RETRIEVAL_STATUS_PAYWALL' """Url retrieval is failed because the content is behind paywall. This enum value is not supported in Vertex AI.""" URL_RETRIEVAL_STATUS_UNSAFE = 'URL_RETRIEVAL_STATUS_UNSAFE' @@ -546,6 +570,72 @@ class JobState(_common.CaseInSensitiveEnum): """The job is partially succeeded, some results may be missing due to errors.""" +class TuningJobState(_common.CaseInSensitiveEnum): + """Output only. + + The detail state of the tuning job (while the overall `JobState` is running). + This enum is not supported in Gemini API. + """ + + TUNING_JOB_STATE_UNSPECIFIED = 'TUNING_JOB_STATE_UNSPECIFIED' + """Default tuning job state.""" + TUNING_JOB_STATE_WAITING_FOR_QUOTA = 'TUNING_JOB_STATE_WAITING_FOR_QUOTA' + """Tuning job is waiting for job quota.""" + TUNING_JOB_STATE_PROCESSING_DATASET = 'TUNING_JOB_STATE_PROCESSING_DATASET' + """Tuning job is validating the dataset.""" + TUNING_JOB_STATE_WAITING_FOR_CAPACITY = ( + 'TUNING_JOB_STATE_WAITING_FOR_CAPACITY' + ) + """Tuning job is waiting for hardware capacity.""" + TUNING_JOB_STATE_TUNING = 'TUNING_JOB_STATE_TUNING' + """Tuning job is running.""" + TUNING_JOB_STATE_POST_PROCESSING = 'TUNING_JOB_STATE_POST_PROCESSING' + """Tuning job is doing some post processing steps.""" + + +class AggregationMetric(_common.CaseInSensitiveEnum): + """Aggregation metric. This enum is not supported in Gemini API.""" + + AGGREGATION_METRIC_UNSPECIFIED = 'AGGREGATION_METRIC_UNSPECIFIED' + """Unspecified aggregation metric.""" + AVERAGE = 'AVERAGE' + """Average aggregation metric. Not supported for Pairwise metric.""" + MODE = 'MODE' + """Mode aggregation metric.""" + STANDARD_DEVIATION = 'STANDARD_DEVIATION' + """Standard deviation aggregation metric. Not supported for pairwise metric.""" + VARIANCE = 'VARIANCE' + """Variance aggregation metric. Not supported for pairwise metric.""" + MINIMUM = 'MINIMUM' + """Minimum aggregation metric. Not supported for pairwise metric.""" + MAXIMUM = 'MAXIMUM' + """Maximum aggregation metric. Not supported for pairwise metric.""" + MEDIAN = 'MEDIAN' + """Median aggregation metric. Not supported for pairwise metric.""" + PERCENTILE_P90 = 'PERCENTILE_P90' + """90th percentile aggregation metric. Not supported for pairwise metric.""" + PERCENTILE_P95 = 'PERCENTILE_P95' + """95th percentile aggregation metric. Not supported for pairwise metric.""" + PERCENTILE_P99 = 'PERCENTILE_P99' + """99th percentile aggregation metric. Not supported for pairwise metric.""" + + +class PairwiseChoice(_common.CaseInSensitiveEnum): + """Output only. + + Pairwise metric choice. This enum is not supported in Gemini API. + """ + + PAIRWISE_CHOICE_UNSPECIFIED = 'PAIRWISE_CHOICE_UNSPECIFIED' + """Unspecified prediction choice.""" + BASELINE = 'BASELINE' + """Baseline prediction wins""" + CANDIDATE = 'CANDIDATE' + """Candidate prediction wins""" + TIE = 'TIE' + """Winner cannot be determined""" + + class TuningTask(_common.CaseInSensitiveEnum): """The tuning task. @@ -562,6 +652,15 @@ class TuningTask(_common.CaseInSensitiveEnum): """Tuning task for reference to video.""" +class DocumentState(_common.CaseInSensitiveEnum): + """State for the lifecycle of a Document.""" + + STATE_UNSPECIFIED = 'STATE_UNSPECIFIED' + STATE_PENDING = 'STATE_PENDING' + STATE_ACTIVE = 'STATE_ACTIVE' + STATE_FAILED = 'STATE_FAILED' + + class PartMediaResolutionLevel(_common.CaseInSensitiveEnum): """The tokenization quality used for given media.""" @@ -633,15 +732,6 @@ class DynamicRetrievalConfigMode(_common.CaseInSensitiveEnum): """Run retrieval only when system decides it is necessary.""" -class Environment(_common.CaseInSensitiveEnum): - """The environment being operated.""" - - ENVIRONMENT_UNSPECIFIED = 'ENVIRONMENT_UNSPECIFIED' - """Defaults to browser.""" - ENVIRONMENT_BROWSER = 'ENVIRONMENT_BROWSER' - """Operates in a web browser.""" - - class FunctionCallingConfigMode(_common.CaseInSensitiveEnum): """Config for the function calling config mode.""" @@ -666,17 +756,6 @@ class SafetyFilterLevel(_common.CaseInSensitiveEnum): BLOCK_NONE = 'BLOCK_NONE' -class PersonGeneration(_common.CaseInSensitiveEnum): - """Enum that controls the generation of people.""" - - DONT_ALLOW = 'DONT_ALLOW' - """Block generation of images of people.""" - ALLOW_ADULT = 'ALLOW_ADULT' - """Generate images of adults, but not children.""" - ALLOW_ALL = 'ALLOW_ALL' - """Generate images that include adults and children.""" - - class ImagePromptLanguage(_common.CaseInSensitiveEnum): """Enum that specifies the language of the text in the prompt.""" @@ -801,15 +880,6 @@ class TuningMethod(_common.CaseInSensitiveEnum): """Preference optimization tuning.""" -class DocumentState(_common.CaseInSensitiveEnum): - """State for the lifecycle of a Document.""" - - STATE_UNSPECIFIED = 'STATE_UNSPECIFIED' - STATE_PENDING = 'STATE_PENDING' - STATE_ACTIVE = 'STATE_ACTIVE' - STATE_FAILED = 'STATE_FAILED' - - class FileState(_common.CaseInSensitiveEnum): """State for the lifecycle of a File.""" @@ -1064,14 +1134,20 @@ class ExecutableCodeDict(TypedDict, total=False): class FileData(_common.BaseModel): - """URI based data.""" + """URI-based data. + + A FileData message contains a URI pointing to data of a specific media type. + It is used to represent images, audio, and video stored in Google Cloud + Storage. + """ display_name: Optional[str] = Field( default=None, - description="""Optional. Display name of the file data. Used to provide a label or filename to distinguish file datas. This field is only returned in PromptMessage for prompt management. It is currently used in the Gemini GenerateContent calls only when server side tools (code_execution, google_search, and url_context) are enabled. This field is not supported in Gemini API.""", + description="""Optional. The display name of the file. Used to provide a label or filename to distinguish files. This field is only returned in `PromptMessage` for prompt management. It is used in the Gemini calls only when server side tools (`code_execution`, `google_search`, and `url_context`) are enabled. This field is not supported in Gemini API.""", ) file_uri: Optional[str] = Field( - default=None, description="""Required. URI.""" + default=None, + description="""Required. The URI of the file in Google Cloud Storage.""", ) mime_type: Optional[str] = Field( default=None, @@ -1080,13 +1156,18 @@ class FileData(_common.BaseModel): class FileDataDict(TypedDict, total=False): - """URI based data.""" + """URI-based data. + + A FileData message contains a URI pointing to data of a specific media type. + It is used to represent images, audio, and video stored in Google Cloud + Storage. + """ display_name: Optional[str] - """Optional. Display name of the file data. Used to provide a label or filename to distinguish file datas. This field is only returned in PromptMessage for prompt management. It is currently used in the Gemini GenerateContent calls only when server side tools (code_execution, google_search, and url_context) are enabled. This field is not supported in Gemini API.""" + """Optional. The display name of the file. Used to provide a label or filename to distinguish files. This field is only returned in `PromptMessage` for prompt management. It is used in the Gemini calls only when server side tools (`code_execution`, `google_search`, and `url_context`) are enabled. This field is not supported in Gemini API.""" file_uri: Optional[str] - """Required. URI.""" + """Required. The URI of the file in Google Cloud Storage.""" mime_type: Optional[str] """Required. The IANA standard MIME type of the source data.""" @@ -1101,6 +1182,13 @@ class PartialArg(_common.BaseModel): This data type is not supported in Gemini API. """ + bool_value: Optional[bool] = Field( + default=None, description="""Optional. Represents a boolean value.""" + ) + json_path: Optional[str] = Field( + default=None, + description="""Required. A JSON Path (RFC 9535) to the argument being streamed. https://datatracker.ietf.org/doc/html/rfc9535. e.g. "$.foo.bar[0].data".""", + ) null_value: Optional[Literal['NULL_VALUE']] = Field( default=None, description="""Optional. Represents a null value.""" ) @@ -1110,13 +1198,6 @@ class PartialArg(_common.BaseModel): string_value: Optional[str] = Field( default=None, description="""Optional. Represents a string value.""" ) - bool_value: Optional[bool] = Field( - default=None, description="""Optional. Represents a boolean value.""" - ) - json_path: Optional[str] = Field( - default=None, - description="""Required. A JSON Path (RFC 9535) to the argument being streamed. https://datatracker.ietf.org/doc/html/rfc9535. e.g. "$.foo.bar[0].data".""", - ) will_continue: Optional[bool] = Field( default=None, description="""Optional. Whether this is not the last part of the same json_path. If true, another PartialArg message for the current json_path is expected to follow.""", @@ -1129,6 +1210,12 @@ class PartialArgDict(TypedDict, total=False): This data type is not supported in Gemini API. """ + bool_value: Optional[bool] + """Optional. Represents a boolean value.""" + + json_path: Optional[str] + """Required. A JSON Path (RFC 9535) to the argument being streamed. https://datatracker.ietf.org/doc/html/rfc9535. e.g. "$.foo.bar[0].data".""" + null_value: Optional[Literal['NULL_VALUE']] """Optional. Represents a null value.""" @@ -1138,12 +1225,6 @@ class PartialArgDict(TypedDict, total=False): string_value: Optional[str] """Optional. Represents a string value.""" - bool_value: Optional[bool] - """Optional. Represents a boolean value.""" - - json_path: Optional[str] - """Required. A JSON Path (RFC 9535) to the argument being streamed. https://datatracker.ietf.org/doc/html/rfc9535. e.g. "$.foo.bar[0].data".""" - will_continue: Optional[bool] """Optional. Whether this is not the last part of the same json_path. If true, another PartialArg message for the current json_path is expected to follow.""" @@ -1430,14 +1511,18 @@ class FunctionResponseDict(TypedDict, total=False): class Blob(_common.BaseModel): - """Content blob.""" + """A content blob. + + A Blob contains data of a specific media type. It is used to represent images, + audio, and video. + """ data: Optional[bytes] = Field( - default=None, description="""Required. Raw bytes.""" + default=None, description="""Required. The raw bytes of the data.""" ) display_name: Optional[str] = Field( default=None, - description="""Optional. Display name of the blob. Used to provide a label or filename to distinguish blobs. This field is only returned in PromptMessage for prompt management. It is currently used in the Gemini GenerateContent calls only when server side tools (code_execution, google_search, and url_context) are enabled. This field is not supported in Gemini API.""", + description="""Optional. The display name of the blob. Used to provide a label or filename to distinguish blobs. This field is only returned in `PromptMessage` for prompt management. It is used in the Gemini calls only when server-side tools (`code_execution`, `google_search`, and `url_context`) are enabled. This field is not supported in Gemini API.""", ) mime_type: Optional[str] = Field( default=None, @@ -1459,13 +1544,17 @@ def as_image(self) -> Optional['Image']: class BlobDict(TypedDict, total=False): - """Content blob.""" + """A content blob. + + A Blob contains data of a specific media type. It is used to represent images, + audio, and video. + """ data: Optional[bytes] - """Required. Raw bytes.""" + """Required. The raw bytes of the data.""" display_name: Optional[str] - """Optional. Display name of the blob. Used to provide a label or filename to distinguish blobs. This field is only returned in PromptMessage for prompt management. It is currently used in the Gemini GenerateContent calls only when server side tools (code_execution, google_search, and url_context) are enabled. This field is not supported in Gemini API.""" + """Optional. The display name of the blob. Used to provide a label or filename to distinguish blobs. This field is only returned in `PromptMessage` for prompt management. It is used in the Gemini calls only when server-side tools (`code_execution`, `google_search`, and `url_context`) are enabled. This field is not supported in Gemini API.""" mime_type: Optional[str] """Required. The IANA standard MIME type of the source data.""" @@ -1475,14 +1564,14 @@ class BlobDict(TypedDict, total=False): class VideoMetadata(_common.BaseModel): - """Metadata describes the input video content.""" + """Provides metadata for a video, including the start and end offsets for clipping and the frame rate.""" end_offset: Optional[str] = Field( default=None, description="""Optional. The end offset of the video.""" ) fps: Optional[float] = Field( default=None, - description="""Optional. The frame rate of the video sent to the model. If not specified, the default value will be 1.0. The fps range is (0.0, 24.0].""", + description="""Optional. The frame rate of the video sent to the model. If not specified, the default value is 1.0. The valid range is (0.0, 24.0].""", ) start_offset: Optional[str] = Field( default=None, description="""Optional. The start offset of the video.""" @@ -1490,13 +1579,13 @@ class VideoMetadata(_common.BaseModel): class VideoMetadataDict(TypedDict, total=False): - """Metadata describes the input video content.""" + """Provides metadata for a video, including the start and end offsets for clipping and the frame rate.""" end_offset: Optional[str] """Optional. The end offset of the video.""" fps: Optional[float] - """Optional. The frame rate of the video sent to the model. If not specified, the default value will be 1.0. The fps range is (0.0, 24.0].""" + """Optional. The frame rate of the video sent to the model. If not specified, the default value is 1.0. The valid range is (0.0, 24.0].""" start_offset: Optional[str] """Optional. The start offset of the video.""" @@ -1520,32 +1609,34 @@ class Part(_common.BaseModel): ) code_execution_result: Optional[CodeExecutionResult] = Field( default=None, - description="""Optional. Result of executing the [ExecutableCode].""", + description="""Optional. The result of executing the ExecutableCode.""", ) executable_code: Optional[ExecutableCode] = Field( default=None, - description="""Optional. Code generated by the model that is meant to be executed.""", + description="""Optional. Code generated by the model that is intended to be executed.""", ) file_data: Optional[FileData] = Field( - default=None, description="""Optional. URI based data.""" + default=None, + description="""Optional. The URI-based data of the part. This can be used to include files from Google Cloud Storage.""", ) function_call: Optional[FunctionCall] = Field( default=None, - description="""Optional. A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] with the parameters and their values.""", + description="""Optional. A predicted function call returned from the model. This contains the name of the function to call and the arguments to pass to the function.""", ) function_response: Optional[FunctionResponse] = Field( default=None, - description="""Optional. The result output of a [FunctionCall] that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing any output from the function call. It is used as context to the model.""", + description="""Optional. The result of a function call. This is used to provide the model with the result of a function call that it predicted.""", ) inline_data: Optional[Blob] = Field( - default=None, description="""Optional. Inlined bytes data.""" + default=None, + description="""Optional. The inline data content of the part. This can be used to include images, audio, or video in a request.""", ) text: Optional[str] = Field( - default=None, description="""Optional. Text part (can be code).""" + default=None, description="""Optional. The text content of the part.""" ) thought: Optional[bool] = Field( default=None, - description="""Optional. Indicates if the part is thought from the model.""", + description="""Optional. Indicates whether the `part` represents the model's thought process or reasoning.""", ) thought_signature: Optional[bytes] = Field( default=None, @@ -1555,6 +1646,10 @@ class Part(_common.BaseModel): default=None, description="""Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data.""", ) + part_metadata: Optional[dict[str, Any]] = Field( + default=None, + description="""Custom metadata associated with the Part. Agents using genai.Part as content representation may need to keep track of the additional information. For example it can be name of a file/source from which the Part originates or a way to multiplex multiple Part streams. This field is not supported in Vertex AI.""", + ) def __init__( self, @@ -1748,28 +1843,28 @@ class PartDict(TypedDict, total=False): """ code_execution_result: Optional[CodeExecutionResultDict] - """Optional. Result of executing the [ExecutableCode].""" + """Optional. The result of executing the ExecutableCode.""" executable_code: Optional[ExecutableCodeDict] - """Optional. Code generated by the model that is meant to be executed.""" + """Optional. Code generated by the model that is intended to be executed.""" file_data: Optional[FileDataDict] - """Optional. URI based data.""" + """Optional. The URI-based data of the part. This can be used to include files from Google Cloud Storage.""" function_call: Optional[FunctionCallDict] - """Optional. A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] with the parameters and their values.""" + """Optional. A predicted function call returned from the model. This contains the name of the function to call and the arguments to pass to the function.""" function_response: Optional[FunctionResponseDict] - """Optional. The result output of a [FunctionCall] that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing any output from the function call. It is used as context to the model.""" + """Optional. The result of a function call. This is used to provide the model with the result of a function call that it predicted.""" inline_data: Optional[BlobDict] - """Optional. Inlined bytes data.""" + """Optional. The inline data content of the part. This can be used to include images, audio, or video in a request.""" text: Optional[str] - """Optional. Text part (can be code).""" + """Optional. The text content of the part.""" thought: Optional[bool] - """Optional. Indicates if the part is thought from the model.""" + """Optional. Indicates whether the `part` represents the model's thought process or reasoning.""" thought_signature: Optional[bytes] """Optional. An opaque signature for the thought so it can be reused in subsequent requests.""" @@ -1777,6 +1872,9 @@ class PartDict(TypedDict, total=False): video_metadata: Optional[VideoMetadataDict] """Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data.""" + part_metadata: Optional[dict[str, Any]] + """Custom metadata associated with the Part. Agents using genai.Part as content representation may need to keep track of the additional information. For example it can be name of a file/source from which the Part originates or a way to multiplex multiple Part streams. This field is not supported in Vertex AI.""" + PartOrDict = Union[Part, PartDict] @@ -1791,7 +1889,7 @@ class Content(_common.BaseModel): ) role: Optional[str] = Field( default=None, - description="""Optional. The producer of the content. Must be either 'user' or 'model'. Useful to set for multi-turn conversations, otherwise can be left blank or unset.""", + description="""Optional. The producer of the content. Must be either 'user' or 'model'. If not set, the service will default to 'user'.""", ) @@ -1803,7 +1901,7 @@ class ContentDict(TypedDict, total=False): a different IANA MIME type.""" role: Optional[str] - """Optional. The producer of the content. Must be either 'user' or 'model'. Useful to set for multi-turn conversations, otherwise can be left blank or unset.""" + """Optional. The producer of the content. Must be either 'user' or 'model'. If not set, the service will default to 'user'.""" ContentOrDict = Union[Content, ContentDict] @@ -2805,7 +2903,7 @@ class FunctionDeclaration(_common.BaseModel): ) name: Optional[str] = Field( default=None, - description="""Required. The name of the function to call. Must start with a letter or an underscore. Must be a-z, A-Z, 0-9, or contain underscores, dots and dashes, with a maximum length of 64.""", + description="""Required. The name of the function to call. Must start with a letter or an underscore. Must be a-z, A-Z, 0-9, or contain underscores, dots, colons and dashes, with a maximum length of 64.""", ) parameters: Optional[Schema] = Field( default=None, @@ -3031,7 +3129,7 @@ class FunctionDeclarationDict(TypedDict, total=False): """Optional. Description and purpose of the function. Model uses it to decide how and whether to call the function.""" name: Optional[str] - """Required. The name of the function to call. Must start with a letter or an underscore. Must be a-z, A-Z, 0-9, or contain underscores, dots and dashes, with a maximum length of 64.""" + """Required. The name of the function to call. Must start with a letter or an underscore. Must be a-z, A-Z, 0-9, or contain underscores, dots, colons and dashes, with a maximum length of 64.""" parameters: Optional[SchemaDict] """Optional. Describes the parameters to this function in JSON Schema Object format. Reflects the Open API 3.03 Parameter Object. string Key: the name of the parameter. Parameter names are case sensitive. Schema Value: the Schema defining the type used for the parameter. For function with no parameters, this can be left unset. Parameter names must start with a letter or an underscore and must only contain chars a-z, A-Z, 0-9, or underscores with a maximum length of 64. Example with 1 required and 1 optional parameter: type: OBJECT properties: param1: type: STRING param2: type: INTEGER required: - param1""" @@ -3166,58 +3264,6 @@ class FileSearchDict(TypedDict, total=False): FileSearchOrDict = Union[FileSearch, FileSearchDict] -class ApiAuthApiKeyConfig(_common.BaseModel): - """The API secret. This data type is not supported in Gemini API.""" - - api_key_secret_version: Optional[str] = Field( - default=None, - description="""Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version}""", - ) - api_key_string: Optional[str] = Field( - default=None, - description="""The API key string. Either this or `api_key_secret_version` must be set.""", - ) - - -class ApiAuthApiKeyConfigDict(TypedDict, total=False): - """The API secret. This data type is not supported in Gemini API.""" - - api_key_secret_version: Optional[str] - """Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version}""" - - api_key_string: Optional[str] - """The API key string. Either this or `api_key_secret_version` must be set.""" - - -ApiAuthApiKeyConfigOrDict = Union[ApiAuthApiKeyConfig, ApiAuthApiKeyConfigDict] - - -class ApiAuth(_common.BaseModel): - """The generic reusable api auth config. - - Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) - instead. This data type is not supported in Gemini API. - """ - - api_key_config: Optional[ApiAuthApiKeyConfig] = Field( - default=None, description="""The API secret.""" - ) - - -class ApiAuthDict(TypedDict, total=False): - """The generic reusable api auth config. - - Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) - instead. This data type is not supported in Gemini API. - """ - - api_key_config: Optional[ApiAuthApiKeyConfigDict] - """The API secret.""" - - -ApiAuthOrDict = Union[ApiAuth, ApiAuthDict] - - class ApiKeyConfig(_common.BaseModel): """Config for authentication with API key. @@ -3381,11 +3427,12 @@ class AuthConfigOidcConfigDict(TypedDict, total=False): class AuthConfig(_common.BaseModel): - """Auth configuration to run the extension. - - This data type is not supported in Gemini API. - """ + """The authentication config to access the API.""" + api_key: Optional[str] = Field( + default=None, + description="""The authentication config to access the API. Only API key is supported. This field is not supported in Gemini API.""", + ) api_key_config: Optional[ApiKeyConfig] = Field( default=None, description="""Config for API key auth.""" ) @@ -3409,10 +3456,10 @@ class AuthConfig(_common.BaseModel): class AuthConfigDict(TypedDict, total=False): - """Auth configuration to run the extension. + """The authentication config to access the API.""" - This data type is not supported in Gemini API. - """ + api_key: Optional[str] + """The authentication config to access the API. Only API key is supported. This field is not supported in Gemini API.""" api_key_config: Optional[ApiKeyConfigDict] """Config for API key auth.""" @@ -3438,52 +3485,130 @@ class AuthConfigDict(TypedDict, total=False): AuthConfigOrDict = Union[AuthConfig, AuthConfigDict] -class ExternalApiElasticSearchParams(_common.BaseModel): - """The search parameters to use for the ELASTIC_SEARCH spec. - - This data type is not supported in Gemini API. - """ +class GoogleMaps(_common.BaseModel): + """Tool to retrieve knowledge from Google Maps.""" - index: Optional[str] = Field( - default=None, description="""The ElasticSearch index to use.""" - ) - num_hits: Optional[int] = Field( + auth_config: Optional[AuthConfig] = Field( default=None, - description="""Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param.""", + description="""The authentication config to access the API. Only API key is supported. This field is not supported in Gemini API.""", ) - search_template: Optional[str] = Field( - default=None, description="""The ElasticSearch search template to use.""" + enable_widget: Optional[bool] = Field( + default=None, + description="""Optional. Whether to return a widget context token in the GroundingMetadata of the response. Developers can use the widget context token to render a Google Maps widget with geospatial context related to the places that the model references in the response.""", ) -class ExternalApiElasticSearchParamsDict(TypedDict, total=False): - """The search parameters to use for the ELASTIC_SEARCH spec. +class GoogleMapsDict(TypedDict, total=False): + """Tool to retrieve knowledge from Google Maps.""" - This data type is not supported in Gemini API. - """ + auth_config: Optional[AuthConfigDict] + """The authentication config to access the API. Only API key is supported. This field is not supported in Gemini API.""" - index: Optional[str] - """The ElasticSearch index to use.""" + enable_widget: Optional[bool] + """Optional. Whether to return a widget context token in the GroundingMetadata of the response. Developers can use the widget context token to render a Google Maps widget with geospatial context related to the places that the model references in the response.""" - num_hits: Optional[int] - """Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param.""" - search_template: Optional[str] - """The ElasticSearch search template to use.""" +GoogleMapsOrDict = Union[GoogleMaps, GoogleMapsDict] -ExternalApiElasticSearchParamsOrDict = Union[ - ExternalApiElasticSearchParams, ExternalApiElasticSearchParamsDict -] +class ApiAuthApiKeyConfig(_common.BaseModel): + """The API secret. This data type is not supported in Gemini API.""" + api_key_secret_version: Optional[str] = Field( + default=None, + description="""Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version}""", + ) + api_key_string: Optional[str] = Field( + default=None, + description="""The API key string. Either this or `api_key_secret_version` must be set.""", + ) -class ExternalApiSimpleSearchParams(_common.BaseModel): - """The search parameters to use for SIMPLE_SEARCH spec. - This data type is not supported in Gemini API. - """ +class ApiAuthApiKeyConfigDict(TypedDict, total=False): + """The API secret. This data type is not supported in Gemini API.""" - pass + api_key_secret_version: Optional[str] + """Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version}""" + + api_key_string: Optional[str] + """The API key string. Either this or `api_key_secret_version` must be set.""" + + +ApiAuthApiKeyConfigOrDict = Union[ApiAuthApiKeyConfig, ApiAuthApiKeyConfigDict] + + +class ApiAuth(_common.BaseModel): + """The generic reusable api auth config. + + Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) + instead. This data type is not supported in Gemini API. + """ + + api_key_config: Optional[ApiAuthApiKeyConfig] = Field( + default=None, description="""The API secret.""" + ) + + +class ApiAuthDict(TypedDict, total=False): + """The generic reusable api auth config. + + Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) + instead. This data type is not supported in Gemini API. + """ + + api_key_config: Optional[ApiAuthApiKeyConfigDict] + """The API secret.""" + + +ApiAuthOrDict = Union[ApiAuth, ApiAuthDict] + + +class ExternalApiElasticSearchParams(_common.BaseModel): + """The search parameters to use for the ELASTIC_SEARCH spec. + + This data type is not supported in Gemini API. + """ + + index: Optional[str] = Field( + default=None, description="""The ElasticSearch index to use.""" + ) + num_hits: Optional[int] = Field( + default=None, + description="""Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param.""", + ) + search_template: Optional[str] = Field( + default=None, description="""The ElasticSearch search template to use.""" + ) + + +class ExternalApiElasticSearchParamsDict(TypedDict, total=False): + """The search parameters to use for the ELASTIC_SEARCH spec. + + This data type is not supported in Gemini API. + """ + + index: Optional[str] + """The ElasticSearch index to use.""" + + num_hits: Optional[int] + """Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param.""" + + search_template: Optional[str] + """The ElasticSearch search template to use.""" + + +ExternalApiElasticSearchParamsOrDict = Union[ + ExternalApiElasticSearchParams, ExternalApiElasticSearchParamsDict +] + + +class ExternalApiSimpleSearchParams(_common.BaseModel): + """The search parameters to use for SIMPLE_SEARCH spec. + + This data type is not supported in Gemini API. + """ + + pass class ExternalApiSimpleSearchParamsDict(TypedDict, total=False): @@ -3998,14 +4123,14 @@ class EnterpriseWebSearch(_common.BaseModel): This data type is not supported in Gemini API. """ - exclude_domains: Optional[list[str]] = Field( - default=None, - description="""Optional. List of domains to be excluded from the search results. The default limit is 2000 domains.""", - ) blocking_confidence: Optional[PhishBlockThreshold] = Field( default=None, description="""Optional. Sites with confidence level chosen & above this value will be blocked from the search results.""", ) + exclude_domains: Optional[list[str]] = Field( + default=None, + description="""Optional. List of domains to be excluded from the search results. The default limit is 2000 domains.""", + ) class EnterpriseWebSearchDict(TypedDict, total=False): @@ -4014,40 +4139,14 @@ class EnterpriseWebSearchDict(TypedDict, total=False): This data type is not supported in Gemini API. """ - exclude_domains: Optional[list[str]] - """Optional. List of domains to be excluded from the search results. The default limit is 2000 domains.""" - blocking_confidence: Optional[PhishBlockThreshold] """Optional. Sites with confidence level chosen & above this value will be blocked from the search results.""" - -EnterpriseWebSearchOrDict = Union[EnterpriseWebSearch, EnterpriseWebSearchDict] - - -class GoogleMaps(_common.BaseModel): - """Tool to retrieve public maps data for grounding, powered by Google.""" - - auth_config: Optional[AuthConfig] = Field( - default=None, - description="""The authentication config to access the API. Only API key is supported. This field is not supported in Gemini API.""", - ) - enable_widget: Optional[bool] = Field( - default=None, - description="""Optional. If true, include the widget context token in the response.""", - ) - - -class GoogleMapsDict(TypedDict, total=False): - """Tool to retrieve public maps data for grounding, powered by Google.""" - - auth_config: Optional[AuthConfigDict] - """The authentication config to access the API. Only API key is supported. This field is not supported in Gemini API.""" - - enable_widget: Optional[bool] - """Optional. If true, include the widget context token in the response.""" + exclude_domains: Optional[list[str]] + """Optional. List of domains to be excluded from the search results. The default limit is 2000 domains.""" -GoogleMapsOrDict = Union[GoogleMaps, GoogleMapsDict] +EnterpriseWebSearchOrDict = Union[EnterpriseWebSearch, EnterpriseWebSearchDict] class Interval(_common.BaseModel): @@ -4092,14 +4191,14 @@ class GoogleSearch(_common.BaseModel): Tool to support Google Search in Model. Powered by Google. """ - exclude_domains: Optional[list[str]] = Field( - default=None, - description="""Optional. List of domains to be excluded from the search results. The default limit is 2000 domains. Example: ["amazon.com", "facebook.com"]. This field is not supported in Gemini API.""", - ) blocking_confidence: Optional[PhishBlockThreshold] = Field( default=None, description="""Optional. Sites with confidence level chosen & above this value will be blocked from the search results. This field is not supported in Gemini API.""", ) + exclude_domains: Optional[list[str]] = Field( + default=None, + description="""Optional. List of domains to be excluded from the search results. The default limit is 2000 domains. Example: ["amazon.com", "facebook.com"]. This field is not supported in Gemini API.""", + ) time_range_filter: Optional[Interval] = Field( default=None, description="""Optional. Filter search results to a specific time range. If customers set a start time, they must set an end time (and vice versa). This field is not supported in Vertex AI.""", @@ -4112,12 +4211,12 @@ class GoogleSearchDict(TypedDict, total=False): Tool to support Google Search in Model. Powered by Google. """ - exclude_domains: Optional[list[str]] - """Optional. List of domains to be excluded from the search results. The default limit is 2000 domains. Example: ["amazon.com", "facebook.com"]. This field is not supported in Gemini API.""" - blocking_confidence: Optional[PhishBlockThreshold] """Optional. Sites with confidence level chosen & above this value will be blocked from the search results. This field is not supported in Gemini API.""" + exclude_domains: Optional[list[str]] + """Optional. List of domains to be excluded from the search results. The default limit is 2000 domains. Example: ["amazon.com", "facebook.com"]. This field is not supported in Gemini API.""" + time_range_filter: Optional[IntervalDict] """Optional. Filter search results to a specific time range. If customers set a start time, they must set an end time (and vice versa). This field is not supported in Vertex AI.""" @@ -4165,6 +4264,11 @@ class Tool(_common.BaseModel): default=None, description="""Optional. Tool to retrieve knowledge from the File Search Stores.""", ) + google_maps: Optional[GoogleMaps] = Field( + default=None, + description="""Optional. Tool that allows grounding the model's response with + geospatial context related to the user's query.""", + ) code_execution: Optional[ToolCodeExecution] = Field( default=None, description="""Optional. CodeExecution tool type. Enables the model to execute code as part of generation.""", @@ -4173,10 +4277,6 @@ class Tool(_common.BaseModel): default=None, description="""Optional. Tool to support searching public web data, powered by Vertex AI Search and Sec4 compliance. This field is not supported in Gemini API.""", ) - google_maps: Optional[GoogleMaps] = Field( - default=None, - description="""Optional. GoogleMaps tool type. Tool to support Google Maps in Model.""", - ) google_search: Optional[GoogleSearch] = Field( default=None, description="""Optional. GoogleSearch tool type. Tool to support Google Search in Model. Powered by Google.""", @@ -4207,15 +4307,16 @@ class ToolDict(TypedDict, total=False): file_search: Optional[FileSearchDict] """Optional. Tool to retrieve knowledge from the File Search Stores.""" + google_maps: Optional[GoogleMapsDict] + """Optional. Tool that allows grounding the model's response with + geospatial context related to the user's query.""" + code_execution: Optional[ToolCodeExecutionDict] """Optional. CodeExecution tool type. Enables the model to execute code as part of generation.""" enterprise_web_search: Optional[EnterpriseWebSearchDict] """Optional. Tool to support searching public web data, powered by Vertex AI Search and Sec4 compliance. This field is not supported in Gemini API.""" - google_maps: Optional[GoogleMapsDict] - """Optional. GoogleMaps tool type. Tool to support Google Maps in Model.""" - google_search: Optional[GoogleSearchDict] """Optional. GoogleSearch tool type. Tool to support Google Search in Model. Powered by Google.""" @@ -4442,7 +4543,7 @@ class ThinkingConfig(_common.BaseModel): ) thinking_level: Optional[ThinkingLevel] = Field( default=None, - description="""Optional. The level of thoughts tokens that the model should generate.""", + description="""Optional. The number of thoughts tokens that the model should generate.""", ) @@ -4458,12 +4559,46 @@ class ThinkingConfigDict(TypedDict, total=False): """ thinking_level: Optional[ThinkingLevel] - """Optional. The level of thoughts tokens that the model should generate.""" + """Optional. The number of thoughts tokens that the model should generate.""" ThinkingConfigOrDict = Union[ThinkingConfig, ThinkingConfigDict] +class ImageConfigImageOutputOptions(_common.BaseModel): + """The image output format for generated images. + + This data type is not supported in Gemini API. + """ + + compression_quality: Optional[int] = Field( + default=None, + description="""Optional. The compression quality of the output image.""", + ) + mime_type: Optional[str] = Field( + default=None, + description="""Optional. The image format that the output should be saved as.""", + ) + + +class ImageConfigImageOutputOptionsDict(TypedDict, total=False): + """The image output format for generated images. + + This data type is not supported in Gemini API. + """ + + compression_quality: Optional[int] + """Optional. The compression quality of the output image.""" + + mime_type: Optional[str] + """Optional. The image format that the output should be saved as.""" + + +ImageConfigImageOutputOptionsOrDict = Union[ + ImageConfigImageOutputOptions, ImageConfigImageOutputOptionsDict +] + + class ImageConfig(_common.BaseModel): """The image generation configuration to be used in GenerateContentConfig.""" @@ -4488,6 +4623,14 @@ class ImageConfig(_common.BaseModel): description="""Compression quality of the generated image (for ``image/jpeg`` only). This field is not supported in Gemini API.""", ) + image_output_options: Optional[ImageConfigImageOutputOptions] = Field( + default=None, + description="""Optional. The image output format for generated images. This field is not supported in Gemini API.""", + ) + person_generation: Optional[PersonGeneration] = Field( + default=None, + description="""Optional. Controls whether the model can generate people. This field is not supported in Gemini API.""", + ) class ImageConfigDict(TypedDict, total=False): @@ -4510,6 +4653,12 @@ class ImageConfigDict(TypedDict, total=False): """Compression quality of the generated image (for ``image/jpeg`` only). This field is not supported in Gemini API.""" + image_output_options: Optional[ImageConfigImageOutputOptionsDict] + """Optional. The image output format for generated images. This field is not supported in Gemini API.""" + + person_generation: Optional[PersonGeneration] + """Optional. Controls whether the model can generate people. This field is not supported in Gemini API.""" + ImageConfigOrDict = Union[ImageConfig, ImageConfigDict] @@ -4671,9 +4820,11 @@ class FileDict(TypedDict, total=False): class GenerationConfigRoutingConfigAutoRoutingMode(_common.BaseModel): - """When automated routing is specified, the routing will be determined by the pretrained routing model and customer provided model routing preference. + """The configuration for automated routing. - This data type is not supported in Gemini API. + When automated routing is specified, the routing will be determined by the + pretrained routing model and customer provided model routing preference. This + data type is not supported in Gemini API. """ model_routing_preference: Optional[ @@ -4682,9 +4833,11 @@ class GenerationConfigRoutingConfigAutoRoutingMode(_common.BaseModel): class GenerationConfigRoutingConfigAutoRoutingModeDict(TypedDict, total=False): - """When automated routing is specified, the routing will be determined by the pretrained routing model and customer provided model routing preference. + """The configuration for automated routing. - This data type is not supported in Gemini API. + When automated routing is specified, the routing will be determined by the + pretrained routing model and customer provided model routing preference. This + data type is not supported in Gemini API. """ model_routing_preference: Optional[ @@ -4700,27 +4853,29 @@ class GenerationConfigRoutingConfigAutoRoutingModeDict(TypedDict, total=False): class GenerationConfigRoutingConfigManualRoutingMode(_common.BaseModel): - """When manual routing is set, the specified model will be used directly. + """The configuration for manual routing. - This data type is not supported in Gemini API. + When manual routing is specified, the model will be selected based on the + model name provided. This data type is not supported in Gemini API. """ model_name: Optional[str] = Field( default=None, - description="""The model name to use. Only the public LLM models are accepted. See [Supported models](https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference#supported-models).""", + description="""The name of the model to use. Only public LLM models are accepted.""", ) class GenerationConfigRoutingConfigManualRoutingModeDict( TypedDict, total=False ): - """When manual routing is set, the specified model will be used directly. + """The configuration for manual routing. - This data type is not supported in Gemini API. + When manual routing is specified, the model will be selected based on the + model name provided. This data type is not supported in Gemini API. """ model_name: Optional[str] - """The model name to use. Only the public LLM models are accepted. See [Supported models](https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference#supported-models).""" + """The name of the model to use. Only public LLM models are accepted.""" GenerationConfigRoutingConfigManualRoutingModeOrDict = Union[ @@ -4732,28 +4887,34 @@ class GenerationConfigRoutingConfigManualRoutingModeDict( class GenerationConfigRoutingConfig(_common.BaseModel): """The configuration for routing the request to a specific model. - This data type is not supported in Gemini API. + This can be used to control which model is used for the generation, either + automatically or by specifying a model name. This data type is not supported + in Gemini API. """ auto_mode: Optional[GenerationConfigRoutingConfigAutoRoutingMode] = Field( - default=None, description="""Automated routing.""" + default=None, + description="""In this mode, the model is selected automatically based on the content of the request.""", ) manual_mode: Optional[GenerationConfigRoutingConfigManualRoutingMode] = Field( - default=None, description="""Manual routing.""" + default=None, + description="""In this mode, the model is specified manually.""", ) class GenerationConfigRoutingConfigDict(TypedDict, total=False): """The configuration for routing the request to a specific model. - This data type is not supported in Gemini API. + This can be used to control which model is used for the generation, either + automatically or by specifying a model name. This data type is not supported + in Gemini API. """ auto_mode: Optional[GenerationConfigRoutingConfigAutoRoutingModeDict] - """Automated routing.""" + """In this mode, the model is selected automatically based on the content of the request.""" manual_mode: Optional[GenerationConfigRoutingConfigManualRoutingModeDict] - """Manual routing.""" + """In this mode, the model is specified manually.""" GenerationConfigRoutingConfigOrDict = Union[ @@ -4762,63 +4923,70 @@ class GenerationConfigRoutingConfigDict(TypedDict, total=False): class SafetySetting(_common.BaseModel): - """Safety settings.""" + """A safety setting that affects the safety-blocking behavior. + + A SafetySetting consists of a harm category and a threshold for that category. + """ category: Optional[HarmCategory] = Field( - default=None, description="""Required. Harm category.""" + default=None, description="""Required. The harm category to be blocked.""" ) method: Optional[HarmBlockMethod] = Field( default=None, - description="""Optional. Specify if the threshold is used for probability or severity score. If not specified, the threshold is used for probability score. This field is not supported in Gemini API.""", + description="""Optional. The method for blocking content. If not specified, the default behavior is to use the probability score. This field is not supported in Gemini API.""", ) threshold: Optional[HarmBlockThreshold] = Field( - default=None, description="""Required. The harm block threshold.""" + default=None, + description="""Required. The threshold for blocking content. If the harm probability exceeds this threshold, the content will be blocked.""", ) class SafetySettingDict(TypedDict, total=False): - """Safety settings.""" + """A safety setting that affects the safety-blocking behavior. + + A SafetySetting consists of a harm category and a threshold for that category. + """ category: Optional[HarmCategory] - """Required. Harm category.""" + """Required. The harm category to be blocked.""" method: Optional[HarmBlockMethod] - """Optional. Specify if the threshold is used for probability or severity score. If not specified, the threshold is used for probability score. This field is not supported in Gemini API.""" + """Optional. The method for blocking content. If not specified, the default behavior is to use the probability score. This field is not supported in Gemini API.""" threshold: Optional[HarmBlockThreshold] - """Required. The harm block threshold.""" + """Required. The threshold for blocking content. If the harm probability exceeds this threshold, the content will be blocked.""" SafetySettingOrDict = Union[SafetySetting, SafetySettingDict] class SpeechConfig(_common.BaseModel): - """The speech generation config.""" + """Configuration for speech generation.""" language_code: Optional[str] = Field( default=None, - description="""Optional. Language code (ISO 639. e.g. en-US) for the speech synthesization.""", - ) - voice_config: Optional['VoiceConfig'] = Field( - default=None, description="""The configuration for the speaker to use.""" + description="""Optional. The language code (ISO 639-1) for the speech synthesis.""", ) multi_speaker_voice_config: Optional['MultiSpeakerVoiceConfig'] = Field( default=None, - description="""Optional. The configuration for the multi-speaker setup. It is mutually exclusive with the voice_config field. This field is not supported in Vertex AI.""", + description="""The configuration for a multi-speaker text-to-speech request. This field is mutually exclusive with `voice_config`.""", + ) + voice_config: Optional['VoiceConfig'] = Field( + default=None, description="""The configuration for the voice to use.""" ) class SpeechConfigDict(TypedDict, total=False): - """The speech generation config.""" + """Configuration for speech generation.""" language_code: Optional[str] - """Optional. Language code (ISO 639. e.g. en-US) for the speech synthesization.""" - - voice_config: Optional['VoiceConfigDict'] - """The configuration for the speaker to use.""" + """Optional. The language code (ISO 639-1) for the speech synthesis.""" multi_speaker_voice_config: Optional['MultiSpeakerVoiceConfigDict'] - """Optional. The configuration for the multi-speaker setup. It is mutually exclusive with the voice_config field. This field is not supported in Vertex AI.""" + """The configuration for a multi-speaker text-to-speech request. This field is mutually exclusive with `voice_config`.""" + + voice_config: Optional['VoiceConfigDict'] + """The configuration for the voice to use.""" SpeechConfigOrDict = Union[SpeechConfig, SpeechConfigDict] @@ -5378,56 +5546,60 @@ class GoogleTypeDateDict(TypedDict, total=False): class Citation(_common.BaseModel): - """Source attributions for content. + """A citation for a piece of generatedcontent. This data type is not supported in Gemini API. """ end_index: Optional[int] = Field( - default=None, description="""Output only. End index into the content.""" + default=None, + description="""Output only. The end index of the citation in the content.""", ) license: Optional[str] = Field( - default=None, description="""Output only. License of the attribution.""" + default=None, + description="""Output only. The license of the source of the citation.""", ) publication_date: Optional[GoogleTypeDate] = Field( default=None, - description="""Output only. Publication date of the attribution.""", + description="""Output only. The publication date of the source of the citation.""", ) start_index: Optional[int] = Field( - default=None, description="""Output only. Start index into the content.""" + default=None, + description="""Output only. The start index of the citation in the content.""", ) title: Optional[str] = Field( - default=None, description="""Output only. Title of the attribution.""" + default=None, + description="""Output only. The title of the source of the citation.""", ) uri: Optional[str] = Field( default=None, - description="""Output only. Url reference of the attribution.""", + description="""Output only. The URI of the source of the citation.""", ) class CitationDict(TypedDict, total=False): - """Source attributions for content. + """A citation for a piece of generatedcontent. This data type is not supported in Gemini API. """ end_index: Optional[int] - """Output only. End index into the content.""" + """Output only. The end index of the citation in the content.""" license: Optional[str] - """Output only. License of the attribution.""" + """Output only. The license of the source of the citation.""" publication_date: Optional[GoogleTypeDateDict] - """Output only. Publication date of the attribution.""" + """Output only. The publication date of the source of the citation.""" start_index: Optional[int] - """Output only. Start index into the content.""" + """Output only. The start index of the citation in the content.""" title: Optional[str] - """Output only. Title of the attribution.""" + """Output only. The title of the source of the citation.""" uri: Optional[str] - """Output only. Url reference of the attribution.""" + """Output only. The URI of the source of the citation.""" CitationOrDict = Union[Citation, CitationDict] @@ -5466,10 +5638,7 @@ class CitationMetadataDict(TypedDict, total=False): class GroundingChunkMapsPlaceAnswerSourcesAuthorAttribution(_common.BaseModel): - """Author attribution for a photo or review. - - This data type is not supported in Gemini API. - """ + """Author attribution for a photo or review.""" display_name: Optional[str] = Field( default=None, description="""Name of the author of the Photo or Review.""" @@ -5486,10 +5655,7 @@ class GroundingChunkMapsPlaceAnswerSourcesAuthorAttribution(_common.BaseModel): class GroundingChunkMapsPlaceAnswerSourcesAuthorAttributionDict( TypedDict, total=False ): - """Author attribution for a photo or review. - - This data type is not supported in Gemini API. - """ + """Author attribution for a photo or review.""" display_name: Optional[str] """Name of the author of the Photo or Review.""" @@ -5508,10 +5674,7 @@ class GroundingChunkMapsPlaceAnswerSourcesAuthorAttributionDict( class GroundingChunkMapsPlaceAnswerSourcesReviewSnippet(_common.BaseModel): - """Encapsulates a review snippet. - - This data type is not supported in Gemini API. - """ + """Encapsulates a review snippet.""" author_attribution: Optional[ GroundingChunkMapsPlaceAnswerSourcesAuthorAttribution @@ -5542,10 +5705,7 @@ class GroundingChunkMapsPlaceAnswerSourcesReviewSnippet(_common.BaseModel): class GroundingChunkMapsPlaceAnswerSourcesReviewSnippetDict( TypedDict, total=False ): - """Encapsulates a review snippet. - - This data type is not supported in Gemini API. - """ + """Encapsulates a review snippet.""" author_attribution: Optional[ GroundingChunkMapsPlaceAnswerSourcesAuthorAttributionDict @@ -5578,11 +5738,18 @@ class GroundingChunkMapsPlaceAnswerSourcesReviewSnippetDict( class GroundingChunkMapsPlaceAnswerSources(_common.BaseModel): - """Sources used to generate the place answer. + """The sources that were used to generate the place answer. - This data type is not supported in Gemini API. + This includes review snippets and photos that were used to generate the + answer, as well as URIs to flag content. """ + review_snippet: Optional[ + list[GroundingChunkMapsPlaceAnswerSourcesReviewSnippet] + ] = Field( + default=None, + description="""Snippets of reviews that were used to generate the answer.""", + ) flag_content_uri: Optional[str] = Field( default=None, description="""A link where users can flag a problem with the generated answer.""", @@ -5591,23 +5758,29 @@ class GroundingChunkMapsPlaceAnswerSources(_common.BaseModel): list[GroundingChunkMapsPlaceAnswerSourcesReviewSnippet] ] = Field( default=None, - description="""Snippets of reviews that are used to generate the answer.""", + description="""Snippets of reviews that were used to generate the answer.""", ) class GroundingChunkMapsPlaceAnswerSourcesDict(TypedDict, total=False): - """Sources used to generate the place answer. + """The sources that were used to generate the place answer. - This data type is not supported in Gemini API. + This includes review snippets and photos that were used to generate the + answer, as well as URIs to flag content. """ + review_snippet: Optional[ + list[GroundingChunkMapsPlaceAnswerSourcesReviewSnippetDict] + ] + """Snippets of reviews that were used to generate the answer.""" + flag_content_uri: Optional[str] """A link where users can flag a problem with the generated answer.""" review_snippets: Optional[ list[GroundingChunkMapsPlaceAnswerSourcesReviewSnippetDict] ] - """Snippets of reviews that are used to generate the answer.""" + """Snippets of reviews that were used to generate the answer.""" GroundingChunkMapsPlaceAnswerSourcesOrDict = Union[ @@ -5617,44 +5790,62 @@ class GroundingChunkMapsPlaceAnswerSourcesDict(TypedDict, total=False): class GroundingChunkMaps(_common.BaseModel): - """Chunk from Google Maps. This data type is not supported in Gemini API.""" + """A `Maps` chunk is a piece of evidence that comes from Google Maps. + + It contains information about a place, such as its name, address, and reviews. + This is used to provide the user with rich, location-based information. + """ place_answer_sources: Optional[GroundingChunkMapsPlaceAnswerSources] = Field( default=None, - description="""Sources used to generate the place answer. This includes review snippets and photos that were used to generate the answer, as well as uris to flag content.""", + description="""The sources that were used to generate the place answer. + + This includes review snippets and photos that were used to generate the + answer, as well as URIs to flag content.""", ) place_id: Optional[str] = Field( default=None, - description="""This Place's resource name, in `places/{place_id}` format. Can be used to look up the Place.""", + description="""This Place's resource name, in `places/{place_id}` format. + + This can be used to look up the place in the Google Maps API.""", ) text: Optional[str] = Field( - default=None, description="""Text of the place answer.""" + default=None, description="""The text of the place answer.""" ) title: Optional[str] = Field( - default=None, description="""Title of the place.""" + default=None, description="""The title of the place.""" ) uri: Optional[str] = Field( - default=None, description="""URI reference of the place.""" + default=None, description="""The URI of the place.""" ) class GroundingChunkMapsDict(TypedDict, total=False): - """Chunk from Google Maps. This data type is not supported in Gemini API.""" + """A `Maps` chunk is a piece of evidence that comes from Google Maps. + + It contains information about a place, such as its name, address, and reviews. + This is used to provide the user with rich, location-based information. + """ place_answer_sources: Optional[GroundingChunkMapsPlaceAnswerSourcesDict] - """Sources used to generate the place answer. This includes review snippets and photos that were used to generate the answer, as well as uris to flag content.""" + """The sources that were used to generate the place answer. + + This includes review snippets and photos that were used to generate the + answer, as well as URIs to flag content.""" place_id: Optional[str] - """This Place's resource name, in `places/{place_id}` format. Can be used to look up the Place.""" + """This Place's resource name, in `places/{place_id}` format. + + This can be used to look up the place in the Google Maps API.""" text: Optional[str] - """Text of the place answer.""" + """The text of the place answer.""" title: Optional[str] - """Title of the place.""" + """The title of the place.""" uri: Optional[str] - """URI reference of the place.""" + """The URI of the place.""" GroundingChunkMapsOrDict = Union[GroundingChunkMaps, GroundingChunkMapsDict] @@ -5724,50 +5915,59 @@ class RagChunkDict(TypedDict, total=False): class GroundingChunkRetrievedContext(_common.BaseModel): - """Chunk from context retrieved by the retrieval tools. + """Context retrieved from a data source to ground the model's response. - This data type is not supported in Gemini API. + This is used when a retrieval tool fetches information from a user-provided + corpus or a public dataset. """ document_name: Optional[str] = Field( default=None, - description="""Output only. The full document name for the referenced Vertex AI Search document.""", + description="""Output only. The full resource name of the referenced Vertex AI Search document. This is used to identify the specific document that was retrieved. The format is `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document}`. This field is not supported in Gemini API.""", ) rag_chunk: Optional[RagChunk] = Field( default=None, - description="""Additional context for the RAG retrieval result. This is only populated when using the RAG retrieval tool.""", + description="""Additional context for a Retrieval-Augmented Generation (RAG) retrieval result. This is populated only when the RAG retrieval tool is used. This field is not supported in Gemini API.""", ) text: Optional[str] = Field( - default=None, description="""Text of the attribution.""" + default=None, description="""The content of the retrieved data source.""" ) title: Optional[str] = Field( - default=None, description="""Title of the attribution.""" + default=None, description="""The title of the retrieved data source.""" ) uri: Optional[str] = Field( - default=None, description="""URI reference of the attribution.""" + default=None, description="""The URI of the retrieved data source.""" + ) + file_search_store: Optional[str] = Field( + default=None, + description="""Optional. Name of the `FileSearchStore` containing the document. Example: `fileSearchStores/123`. This field is not supported in Vertex AI.""", ) class GroundingChunkRetrievedContextDict(TypedDict, total=False): - """Chunk from context retrieved by the retrieval tools. + """Context retrieved from a data source to ground the model's response. - This data type is not supported in Gemini API. + This is used when a retrieval tool fetches information from a user-provided + corpus or a public dataset. """ document_name: Optional[str] - """Output only. The full document name for the referenced Vertex AI Search document.""" + """Output only. The full resource name of the referenced Vertex AI Search document. This is used to identify the specific document that was retrieved. The format is `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document}`. This field is not supported in Gemini API.""" rag_chunk: Optional[RagChunkDict] - """Additional context for the RAG retrieval result. This is only populated when using the RAG retrieval tool.""" + """Additional context for a Retrieval-Augmented Generation (RAG) retrieval result. This is populated only when the RAG retrieval tool is used. This field is not supported in Gemini API.""" text: Optional[str] - """Text of the attribution.""" + """The content of the retrieved data source.""" title: Optional[str] - """Title of the attribution.""" + """The title of the retrieved data source.""" uri: Optional[str] - """URI reference of the attribution.""" + """The URI of the retrieved data source.""" + + file_search_store: Optional[str] + """Optional. Name of the `FileSearchStore` containing the document. Example: `fileSearchStores/123`. This field is not supported in Vertex AI.""" GroundingChunkRetrievedContextOrDict = Union[ @@ -5776,103 +5976,136 @@ class GroundingChunkRetrievedContextDict(TypedDict, total=False): class GroundingChunkWeb(_common.BaseModel): - """Chunk from the web.""" + """A `Web` chunk is a piece of evidence that comes from a web page. + + It contains the URI of the web page, the title of the page, and the domain of + the page. This is used to provide the user with a link to the source of the + information. + """ domain: Optional[str] = Field( default=None, - description="""Domain of the (original) URI. This field is not supported in Gemini API.""", + description="""The domain of the web page that contains the evidence. This can be used to filter out low-quality sources. This field is not supported in Gemini API.""", ) title: Optional[str] = Field( - default=None, description="""Title of the chunk.""" + default=None, + description="""The title of the web page that contains the evidence.""", ) uri: Optional[str] = Field( - default=None, description="""URI reference of the chunk.""" + default=None, + description="""The URI of the web page that contains the evidence.""", ) class GroundingChunkWebDict(TypedDict, total=False): - """Chunk from the web.""" + """A `Web` chunk is a piece of evidence that comes from a web page. + + It contains the URI of the web page, the title of the page, and the domain of + the page. This is used to provide the user with a link to the source of the + information. + """ domain: Optional[str] - """Domain of the (original) URI. This field is not supported in Gemini API.""" + """The domain of the web page that contains the evidence. This can be used to filter out low-quality sources. This field is not supported in Gemini API.""" title: Optional[str] - """Title of the chunk.""" + """The title of the web page that contains the evidence.""" uri: Optional[str] - """URI reference of the chunk.""" + """The URI of the web page that contains the evidence.""" GroundingChunkWebOrDict = Union[GroundingChunkWeb, GroundingChunkWebDict] class GroundingChunk(_common.BaseModel): - """Grounding chunk.""" + """A chunk of evidence that was used to generate the response.""" maps: Optional[GroundingChunkMaps] = Field( default=None, - description="""Grounding chunk from Google Maps. This field is not supported in Gemini API.""", + description="""A `Maps` chunk is a piece of evidence that comes from Google Maps. + + It contains information about a place, such as its name, address, and + reviews. This is used to provide the user with rich, location-based + information.""", ) retrieved_context: Optional[GroundingChunkRetrievedContext] = Field( default=None, - description="""Grounding chunk from context retrieved by the retrieval tools. This field is not supported in Gemini API.""", + description="""A grounding chunk from a data source retrieved by a retrieval tool, such as Vertex AI Search. See the `RetrievedContext` message for details""", ) web: Optional[GroundingChunkWeb] = Field( - default=None, description="""Grounding chunk from the web.""" + default=None, + description="""A grounding chunk from a web page, typically from Google Search. See the `Web` message for details.""", ) class GroundingChunkDict(TypedDict, total=False): - """Grounding chunk.""" + """A chunk of evidence that was used to generate the response.""" maps: Optional[GroundingChunkMapsDict] - """Grounding chunk from Google Maps. This field is not supported in Gemini API.""" + """A `Maps` chunk is a piece of evidence that comes from Google Maps. + + It contains information about a place, such as its name, address, and + reviews. This is used to provide the user with rich, location-based + information.""" retrieved_context: Optional[GroundingChunkRetrievedContextDict] - """Grounding chunk from context retrieved by the retrieval tools. This field is not supported in Gemini API.""" + """A grounding chunk from a data source retrieved by a retrieval tool, such as Vertex AI Search. See the `RetrievedContext` message for details""" web: Optional[GroundingChunkWebDict] - """Grounding chunk from the web.""" + """A grounding chunk from a web page, typically from Google Search. See the `Web` message for details.""" GroundingChunkOrDict = Union[GroundingChunk, GroundingChunkDict] class Segment(_common.BaseModel): - """Segment of the content.""" + """Segment of the content this support belongs to.""" - end_index: Optional[int] = Field( + start_index: Optional[int] = Field( default=None, - description="""Output only. End index in the given Part, measured in bytes. Offset from the start of the Part, exclusive, starting at zero.""", + description="""Output only. Start index in the given Part, measured in bytes. + + Offset from the start of the Part, inclusive, starting at zero.""", ) - part_index: Optional[int] = Field( + end_index: Optional[int] = Field( default=None, - description="""Output only. The index of a Part object within its parent Content object.""", + description="""Output only. End index in the given Part, measured in bytes. + + Offset from the start of the Part, exclusive, starting at zero.""", ) - start_index: Optional[int] = Field( + part_index: Optional[int] = Field( default=None, - description="""Output only. Start index in the given Part, measured in bytes. Offset from the start of the Part, inclusive, starting at zero.""", + description="""Output only. The index of a Part object within its parent + Content object.""", ) text: Optional[str] = Field( default=None, - description="""Output only. The text corresponding to the segment from the response.""", + description="""Output only. The text corresponding to the segment from the + response.""", ) class SegmentDict(TypedDict, total=False): - """Segment of the content.""" + """Segment of the content this support belongs to.""" + + start_index: Optional[int] + """Output only. Start index in the given Part, measured in bytes. + + Offset from the start of the Part, inclusive, starting at zero.""" end_index: Optional[int] - """Output only. End index in the given Part, measured in bytes. Offset from the start of the Part, exclusive, starting at zero.""" + """Output only. End index in the given Part, measured in bytes. + + Offset from the start of the Part, exclusive, starting at zero.""" part_index: Optional[int] - """Output only. The index of a Part object within its parent Content object.""" - - start_index: Optional[int] - """Output only. Start index in the given Part, measured in bytes. Offset from the start of the Part, inclusive, starting at zero.""" + """Output only. The index of a Part object within its parent + Content object.""" text: Optional[str] - """Output only. The text corresponding to the segment from the response.""" + """Output only. The text corresponding to the segment from the + response.""" SegmentOrDict = Union[Segment, SegmentDict] @@ -5883,11 +6116,17 @@ class GroundingSupport(_common.BaseModel): confidence_scores: Optional[list[float]] = Field( default=None, - description="""Confidence score of the support references. Ranges from 0 to 1. 1 is the most confident. For Gemini 2.0 and before, this list must have the same size as the grounding_chunk_indices. For Gemini 2.5 and after, this list will be empty and should be ignored.""", + description="""Confidence score of the support references. + + Ranges from 0 to 1. 1 is the most confident. This list must have the + same size as the grounding_chunk_indices.""", ) grounding_chunk_indices: Optional[list[int]] = Field( default=None, - description="""A list of indices (into 'grounding_chunk') specifying the citations associated with the claim. For instance [1,3,4] means that grounding_chunk[1], grounding_chunk[3], grounding_chunk[4] are the retrieved content attributed to the claim.""", + description="""A list of indices (into 'grounding_chunk') specifying the + citations associated with the claim. For instance [1,3,4] means that + grounding_chunk[1], grounding_chunk[3], grounding_chunk[4] are the + retrieved content attributed to the claim.""", ) segment: Optional[Segment] = Field( default=None, @@ -5899,10 +6138,16 @@ class GroundingSupportDict(TypedDict, total=False): """Grounding support.""" confidence_scores: Optional[list[float]] - """Confidence score of the support references. Ranges from 0 to 1. 1 is the most confident. For Gemini 2.0 and before, this list must have the same size as the grounding_chunk_indices. For Gemini 2.5 and after, this list will be empty and should be ignored.""" + """Confidence score of the support references. + + Ranges from 0 to 1. 1 is the most confident. This list must have the + same size as the grounding_chunk_indices.""" grounding_chunk_indices: Optional[list[int]] - """A list of indices (into 'grounding_chunk') specifying the citations associated with the claim. For instance [1,3,4] means that grounding_chunk[1], grounding_chunk[3], grounding_chunk[4] are the retrieved content attributed to the claim.""" + """A list of indices (into 'grounding_chunk') specifying the + citations associated with the claim. For instance [1,3,4] means that + grounding_chunk[1], grounding_chunk[3], grounding_chunk[4] are the + retrieved content attributed to the claim.""" segment: Optional[SegmentDict] """Segment of the content this support belongs to.""" @@ -5912,78 +6157,90 @@ class GroundingSupportDict(TypedDict, total=False): class RetrievalMetadata(_common.BaseModel): - """Metadata related to retrieval in the grounding flow.""" + """Metadata returned to client when grounding is enabled.""" google_search_dynamic_retrieval_score: Optional[float] = Field( default=None, - description="""Optional. Score indicating how likely information from Google Search could help answer the prompt. The score is in the range `[0, 1]`, where 0 is the least likely and 1 is the most likely. This score is only populated when Google Search grounding and dynamic retrieval is enabled. It will be compared to the threshold to determine whether to trigger Google Search.""", + description="""Optional. Score indicating how likely information from google + search could help answer the prompt. The score is in the range [0, 1], + where 0 is the least likely and 1 is the most likely. This score is only + populated when google search grounding and dynamic retrieval is enabled. + It will be compared to the threshold to determine whether to trigger + Google search.""", ) class RetrievalMetadataDict(TypedDict, total=False): - """Metadata related to retrieval in the grounding flow.""" + """Metadata returned to client when grounding is enabled.""" google_search_dynamic_retrieval_score: Optional[float] - """Optional. Score indicating how likely information from Google Search could help answer the prompt. The score is in the range `[0, 1]`, where 0 is the least likely and 1 is the most likely. This score is only populated when Google Search grounding and dynamic retrieval is enabled. It will be compared to the threshold to determine whether to trigger Google Search.""" + """Optional. Score indicating how likely information from google + search could help answer the prompt. The score is in the range [0, 1], + where 0 is the least likely and 1 is the most likely. This score is only + populated when google search grounding and dynamic retrieval is enabled. + It will be compared to the threshold to determine whether to trigger + Google search.""" RetrievalMetadataOrDict = Union[RetrievalMetadata, RetrievalMetadataDict] class SearchEntryPoint(_common.BaseModel): - """Google search entry point.""" + """The entry point used to search for grounding sources.""" rendered_content: Optional[str] = Field( default=None, - description="""Optional. Web content snippet that can be embedded in a web page or an app webview.""", + description="""Optional. Web content snippet that can be embedded in a web page + or an app webview.""", ) sdk_blob: Optional[bytes] = Field( default=None, - description="""Optional. Base64 encoded JSON representing array of tuple.""", + description="""Optional. JSON representing array of tuples.""", ) class SearchEntryPointDict(TypedDict, total=False): - """Google search entry point.""" + """The entry point used to search for grounding sources.""" rendered_content: Optional[str] - """Optional. Web content snippet that can be embedded in a web page or an app webview.""" + """Optional. Web content snippet that can be embedded in a web page + or an app webview.""" sdk_blob: Optional[bytes] - """Optional. Base64 encoded JSON representing array of tuple.""" + """Optional. JSON representing array of tuples.""" SearchEntryPointOrDict = Union[SearchEntryPoint, SearchEntryPointDict] class GroundingMetadataSourceFlaggingUri(_common.BaseModel): - """Source content flagging uri for a place or review. + """A URI that can be used to flag a place or review for inappropriate content. - This is currently populated only for Google Maps grounding. This data type is - not supported in Gemini API. + This is populated only when the grounding source is Google Maps. This data + type is not supported in Gemini API. """ flag_content_uri: Optional[str] = Field( default=None, - description="""A link where users can flag a problem with the source (place or review).""", + description="""The URI that can be used to flag the content.""", ) source_id: Optional[str] = Field( - default=None, description="""Id of the place or review.""" + default=None, description="""The ID of the place or review.""" ) class GroundingMetadataSourceFlaggingUriDict(TypedDict, total=False): - """Source content flagging uri for a place or review. + """A URI that can be used to flag a place or review for inappropriate content. - This is currently populated only for Google Maps grounding. This data type is - not supported in Gemini API. + This is populated only when the grounding source is Google Maps. This data + type is not supported in Gemini API. """ flag_content_uri: Optional[str] - """A link where users can flag a problem with the source (place or review).""" + """The URI that can be used to flag the content.""" source_id: Optional[str] - """Id of the place or review.""" + """The ID of the place or review.""" GroundingMetadataSourceFlaggingUriOrDict = Union[ @@ -5994,96 +6251,103 @@ class GroundingMetadataSourceFlaggingUriDict(TypedDict, total=False): class GroundingMetadata(_common.BaseModel): """Metadata returned to client when grounding is enabled.""" - google_maps_widget_context_token: Optional[str] = Field( - default=None, - description="""Optional. Output only. Resource name of the Google Maps widget context token to be used with the PlacesContextElement widget to render contextual data. This is populated only for Google Maps grounding. This field is not supported in Gemini API.""", - ) grounding_chunks: Optional[list[GroundingChunk]] = Field( default=None, - description="""List of supporting references retrieved from specified grounding source.""", + description="""List of supporting references retrieved from specified grounding + source.""", ) grounding_supports: Optional[list[GroundingSupport]] = Field( - default=None, description="""Optional. List of grounding support.""" + default=None, description="""List of grounding support.""" ) retrieval_metadata: Optional[RetrievalMetadata] = Field( - default=None, description="""Optional. Output only. Retrieval metadata.""" - ) - retrieval_queries: Optional[list[str]] = Field( default=None, - description="""Optional. Queries executed by the retrieval tools. This field is not supported in Gemini API.""", + description="""Metadata related to retrieval in the grounding flow.""", ) search_entry_point: Optional[SearchEntryPoint] = Field( default=None, - description="""Optional. Google search entry for the following-up web searches.""", + description="""Optional. Google search entry for the following-up web + searches.""", + ) + web_search_queries: Optional[list[str]] = Field( + default=None, + description="""Web search queries for the following-up web search.""", + ) + google_maps_widget_context_token: Optional[str] = Field( + default=None, + description="""Optional. Output only. A token that can be used to render a Google Maps widget with the contextual data. This field is populated only when the grounding source is Google Maps.""", + ) + retrieval_queries: Optional[list[str]] = Field( + default=None, + description="""Optional. The queries that were executed by the retrieval tools. This field is populated only when the grounding source is a retrieval tool, such as Vertex AI Search. This field is not supported in Gemini API.""", ) source_flagging_uris: Optional[list[GroundingMetadataSourceFlaggingUri]] = ( Field( default=None, - description="""Optional. Output only. List of source flagging uris. This is currently populated only for Google Maps grounding. This field is not supported in Gemini API.""", + description="""Optional. Output only. A list of URIs that can be used to flag a place or review for inappropriate content. This field is populated only when the grounding source is Google Maps. This field is not supported in Gemini API.""", ) ) - web_search_queries: Optional[list[str]] = Field( - default=None, - description="""Optional. Web search queries for the following-up web search.""", - ) class GroundingMetadataDict(TypedDict, total=False): """Metadata returned to client when grounding is enabled.""" - google_maps_widget_context_token: Optional[str] - """Optional. Output only. Resource name of the Google Maps widget context token to be used with the PlacesContextElement widget to render contextual data. This is populated only for Google Maps grounding. This field is not supported in Gemini API.""" - grounding_chunks: Optional[list[GroundingChunkDict]] - """List of supporting references retrieved from specified grounding source.""" + """List of supporting references retrieved from specified grounding + source.""" grounding_supports: Optional[list[GroundingSupportDict]] - """Optional. List of grounding support.""" + """List of grounding support.""" retrieval_metadata: Optional[RetrievalMetadataDict] - """Optional. Output only. Retrieval metadata.""" - - retrieval_queries: Optional[list[str]] - """Optional. Queries executed by the retrieval tools. This field is not supported in Gemini API.""" + """Metadata related to retrieval in the grounding flow.""" search_entry_point: Optional[SearchEntryPointDict] - """Optional. Google search entry for the following-up web searches.""" - - source_flagging_uris: Optional[list[GroundingMetadataSourceFlaggingUriDict]] - """Optional. Output only. List of source flagging uris. This is currently populated only for Google Maps grounding. This field is not supported in Gemini API.""" + """Optional. Google search entry for the following-up web + searches.""" web_search_queries: Optional[list[str]] - """Optional. Web search queries for the following-up web search.""" + """Web search queries for the following-up web search.""" + + google_maps_widget_context_token: Optional[str] + """Optional. Output only. A token that can be used to render a Google Maps widget with the contextual data. This field is populated only when the grounding source is Google Maps.""" + + retrieval_queries: Optional[list[str]] + """Optional. The queries that were executed by the retrieval tools. This field is populated only when the grounding source is a retrieval tool, such as Vertex AI Search. This field is not supported in Gemini API.""" + + source_flagging_uris: Optional[list[GroundingMetadataSourceFlaggingUriDict]] + """Optional. Output only. A list of URIs that can be used to flag a place or review for inappropriate content. This field is populated only when the grounding source is Google Maps. This field is not supported in Gemini API.""" GroundingMetadataOrDict = Union[GroundingMetadata, GroundingMetadataDict] class LogprobsResultCandidate(_common.BaseModel): - """Candidate for the logprobs token and score.""" + """A single token and its associated log probability.""" log_probability: Optional[float] = Field( - default=None, description="""The candidate's log probability.""" + default=None, + description="""The log probability of this token. A higher value indicates that the model was more confident in this token. The log probability can be used to assess the relative likelihood of different tokens and to identify when the model is uncertain.""", ) token: Optional[str] = Field( - default=None, description="""The candidate's token string value.""" + default=None, description="""The token's string representation.""" ) token_id: Optional[int] = Field( - default=None, description="""The candidate's token id value.""" + default=None, + description="""The token's numerical ID. While the `token` field provides the string representation of the token, the `token_id` is the numerical representation that the model uses internally. This can be useful for developers who want to build custom logic based on the model's vocabulary.""", ) class LogprobsResultCandidateDict(TypedDict, total=False): - """Candidate for the logprobs token and score.""" + """A single token and its associated log probability.""" log_probability: Optional[float] - """The candidate's log probability.""" + """The log probability of this token. A higher value indicates that the model was more confident in this token. The log probability can be used to assess the relative likelihood of different tokens and to identify when the model is uncertain.""" token: Optional[str] - """The candidate's token string value.""" + """The token's string representation.""" token_id: Optional[int] - """The candidate's token id value.""" + """The token's numerical ID. While the `token` field provides the string representation of the token, the `token_id` is the numerical representation that the model uses internally. This can be useful for developers who want to build custom logic based on the model's vocabulary.""" LogprobsResultCandidateOrDict = Union[ @@ -6092,19 +6356,25 @@ class LogprobsResultCandidateDict(TypedDict, total=False): class LogprobsResultTopCandidates(_common.BaseModel): - """Candidates with top log probabilities at each decoding step.""" + """A list of the top candidate tokens and their log probabilities at each decoding step. + + This can be used to see what other tokens the model considered. + """ candidates: Optional[list[LogprobsResultCandidate]] = Field( default=None, - description="""Sorted by log probability in descending order.""", + description="""The list of candidate tokens, sorted by log probability in descending order.""", ) class LogprobsResultTopCandidatesDict(TypedDict, total=False): - """Candidates with top log probabilities at each decoding step.""" + """A list of the top candidate tokens and their log probabilities at each decoding step. + + This can be used to see what other tokens the model considered. + """ candidates: Optional[list[LogprobsResultCandidateDict]] - """Sorted by log probability in descending order.""" + """The list of candidate tokens, sorted by log probability in descending order.""" LogprobsResultTopCandidatesOrDict = Union[ @@ -6113,39 +6383,67 @@ class LogprobsResultTopCandidatesDict(TypedDict, total=False): class LogprobsResult(_common.BaseModel): - """Logprobs Result""" + """The log probabilities of the tokens generated by the model. + + This is useful for understanding the model's confidence in its predictions and + for debugging. For example, you can use log probabilities to identify when the + model is making a less confident prediction or to explore alternative + responses that the model considered. A low log probability can also indicate + that the model is "hallucinating" or generating factually incorrect + information. + """ chosen_candidates: Optional[list[LogprobsResultCandidate]] = Field( default=None, - description="""Length = total number of decoding steps. The chosen candidates may or may not be in top_candidates.""", + description="""A list of the chosen candidate tokens at each decoding step. The length of this list is equal to the total number of decoding steps. Note that the chosen candidate might not be in `top_candidates`.""", ) top_candidates: Optional[list[LogprobsResultTopCandidates]] = Field( - default=None, description="""Length = total number of decoding steps.""" + default=None, + description="""A list of the top candidate tokens at each decoding step. The length of this list is equal to the total number of decoding steps.""", + ) + log_probability_sum: Optional[float] = Field( + default=None, + description="""Sum of log probabilities for all tokens. This field is not supported in Vertex AI.""", ) class LogprobsResultDict(TypedDict, total=False): - """Logprobs Result""" + """The log probabilities of the tokens generated by the model. + + This is useful for understanding the model's confidence in its predictions and + for debugging. For example, you can use log probabilities to identify when the + model is making a less confident prediction or to explore alternative + responses that the model considered. A low log probability can also indicate + that the model is "hallucinating" or generating factually incorrect + information. + """ chosen_candidates: Optional[list[LogprobsResultCandidateDict]] - """Length = total number of decoding steps. The chosen candidates may or may not be in top_candidates.""" + """A list of the chosen candidate tokens at each decoding step. The length of this list is equal to the total number of decoding steps. Note that the chosen candidate might not be in `top_candidates`.""" top_candidates: Optional[list[LogprobsResultTopCandidatesDict]] - """Length = total number of decoding steps.""" + """A list of the top candidate tokens at each decoding step. The length of this list is equal to the total number of decoding steps.""" + + log_probability_sum: Optional[float] + """Sum of log probabilities for all tokens. This field is not supported in Vertex AI.""" LogprobsResultOrDict = Union[LogprobsResult, LogprobsResultDict] class SafetyRating(_common.BaseModel): - """Safety rating corresponding to the generated content.""" + """A safety rating for a piece of content. + + The safety rating contains the harm category and the harm probability level. + """ blocked: Optional[bool] = Field( default=None, - description="""Output only. Indicates whether the content was filtered out because of this rating.""", + description="""Output only. Indicates whether the content was blocked because of this rating.""", ) category: Optional[HarmCategory] = Field( - default=None, description="""Output only. Harm category.""" + default=None, + description="""Output only. The harm category of this rating.""", ) overwritten_threshold: Optional[HarmBlockThreshold] = Field( default=None, @@ -6153,87 +6451,91 @@ class SafetyRating(_common.BaseModel): ) probability: Optional[HarmProbability] = Field( default=None, - description="""Output only. Harm probability levels in the content.""", + description="""Output only. The probability of harm for this category.""", ) probability_score: Optional[float] = Field( default=None, - description="""Output only. Harm probability score. This field is not supported in Gemini API.""", + description="""Output only. The probability score of harm for this category. This field is not supported in Gemini API.""", ) severity: Optional[HarmSeverity] = Field( default=None, - description="""Output only. Harm severity levels in the content. This field is not supported in Gemini API.""", + description="""Output only. The severity of harm for this category. This field is not supported in Gemini API.""", ) severity_score: Optional[float] = Field( default=None, - description="""Output only. Harm severity score. This field is not supported in Gemini API.""", + description="""Output only. The severity score of harm for this category. This field is not supported in Gemini API.""", ) class SafetyRatingDict(TypedDict, total=False): - """Safety rating corresponding to the generated content.""" + """A safety rating for a piece of content. + + The safety rating contains the harm category and the harm probability level. + """ blocked: Optional[bool] - """Output only. Indicates whether the content was filtered out because of this rating.""" + """Output only. Indicates whether the content was blocked because of this rating.""" category: Optional[HarmCategory] - """Output only. Harm category.""" + """Output only. The harm category of this rating.""" overwritten_threshold: Optional[HarmBlockThreshold] """Output only. The overwritten threshold for the safety category of Gemini 2.0 image out. If minors are detected in the output image, the threshold of each safety category will be overwritten if user sets a lower threshold. This field is not supported in Gemini API.""" probability: Optional[HarmProbability] - """Output only. Harm probability levels in the content.""" + """Output only. The probability of harm for this category.""" probability_score: Optional[float] - """Output only. Harm probability score. This field is not supported in Gemini API.""" + """Output only. The probability score of harm for this category. This field is not supported in Gemini API.""" severity: Optional[HarmSeverity] - """Output only. Harm severity levels in the content. This field is not supported in Gemini API.""" + """Output only. The severity of harm for this category. This field is not supported in Gemini API.""" severity_score: Optional[float] - """Output only. Harm severity score. This field is not supported in Gemini API.""" + """Output only. The severity score of harm for this category. This field is not supported in Gemini API.""" SafetyRatingOrDict = Union[SafetyRating, SafetyRatingDict] class UrlMetadata(_common.BaseModel): - """Context of the a single url retrieval.""" + """The metadata for a single URL retrieval.""" retrieved_url: Optional[str] = Field( - default=None, description="""Retrieved url by the tool.""" + default=None, description="""The URL retrieved by the tool.""" ) url_retrieval_status: Optional[UrlRetrievalStatus] = Field( - default=None, description="""Status of the url retrieval.""" + default=None, description="""The status of the URL retrieval.""" ) class UrlMetadataDict(TypedDict, total=False): - """Context of the a single url retrieval.""" + """The metadata for a single URL retrieval.""" retrieved_url: Optional[str] - """Retrieved url by the tool.""" + """The URL retrieved by the tool.""" url_retrieval_status: Optional[UrlRetrievalStatus] - """Status of the url retrieval.""" + """The status of the URL retrieval.""" UrlMetadataOrDict = Union[UrlMetadata, UrlMetadataDict] class UrlContextMetadata(_common.BaseModel): - """Metadata related to url context retrieval tool.""" + """Metadata returned when the model uses the `url_context` tool to get information from a user-provided URL.""" url_metadata: Optional[list[UrlMetadata]] = Field( - default=None, description="""Output only. List of url context.""" + default=None, + description="""Output only. A list of URL metadata, with one entry for each URL retrieved by the tool.""", ) class UrlContextMetadataDict(TypedDict, total=False): - """Metadata related to url context retrieval tool.""" + """Metadata returned when the model uses the `url_context` tool to get information from a user-provided URL.""" url_metadata: Optional[list[UrlMetadataDict]] - """Output only. List of url context.""" + """Output only. A list of URL metadata, with one entry for each URL retrieved by the tool.""" UrlContextMetadataOrDict = Union[UrlContextMetadata, UrlContextMetadataDict] @@ -6268,28 +6570,32 @@ class Candidate(_common.BaseModel): If empty, the model has not stopped generating the tokens. """, ) - avg_logprobs: Optional[float] = Field( + grounding_metadata: Optional[GroundingMetadata] = Field( default=None, - description="""Output only. Average log probability score of the candidate.""", + description="""Output only. Grounding metadata for the candidate. + + This field is populated for `GenerateContent` calls. + """, ) - grounding_metadata: Optional[GroundingMetadata] = Field( + avg_logprobs: Optional[float] = Field( default=None, - description="""Output only. Metadata specifies sources used to ground generated content.""", + description="""Output only. The average log probability of the tokens in this candidate. This is a length-normalized score that can be used to compare the quality of candidates of different lengths. A higher average log probability suggests a more confident and coherent response.""", ) index: Optional[int] = Field( - default=None, description="""Output only. Index of the candidate.""" + default=None, + description="""Output only. The 0-based index of this candidate in the list of generated responses. This is useful for distinguishing between multiple candidates when `candidate_count` > 1.""", ) logprobs_result: Optional[LogprobsResult] = Field( default=None, - description="""Output only. Log-likelihood scores for the response tokens and top tokens""", + description="""Output only. The detailed log probability information for the tokens in this candidate. This is useful for debugging, understanding model uncertainty, and identifying potential "hallucinations".""", ) safety_ratings: Optional[list[SafetyRating]] = Field( default=None, - description="""Output only. List of ratings for the safety of a response candidate. There is at most one rating per category.""", + description="""Output only. A list of ratings for the safety of a response candidate. There is at most one rating per category.""", ) url_context_metadata: Optional[UrlContextMetadata] = Field( default=None, - description="""Output only. Metadata related to url context retrieval tool.""", + description="""Output only. Metadata returned when the model uses the `url_context` tool to get information from a user-provided URL.""", ) @@ -6317,23 +6623,26 @@ class CandidateDict(TypedDict, total=False): If empty, the model has not stopped generating the tokens. """ - avg_logprobs: Optional[float] - """Output only. Average log probability score of the candidate.""" - grounding_metadata: Optional[GroundingMetadataDict] - """Output only. Metadata specifies sources used to ground generated content.""" + """Output only. Grounding metadata for the candidate. + + This field is populated for `GenerateContent` calls. + """ + + avg_logprobs: Optional[float] + """Output only. The average log probability of the tokens in this candidate. This is a length-normalized score that can be used to compare the quality of candidates of different lengths. A higher average log probability suggests a more confident and coherent response.""" index: Optional[int] - """Output only. Index of the candidate.""" + """Output only. The 0-based index of this candidate in the list of generated responses. This is useful for distinguishing between multiple candidates when `candidate_count` > 1.""" logprobs_result: Optional[LogprobsResultDict] - """Output only. Log-likelihood scores for the response tokens and top tokens""" + """Output only. The detailed log probability information for the tokens in this candidate. This is useful for debugging, understanding model uncertainty, and identifying potential "hallucinations".""" safety_ratings: Optional[list[SafetyRatingDict]] - """Output only. List of ratings for the safety of a response candidate. There is at most one rating per category.""" + """Output only. A list of ratings for the safety of a response candidate. There is at most one rating per category.""" url_context_metadata: Optional[UrlContextMetadataDict] - """Output only. Metadata related to url context retrieval tool.""" + """Output only. Metadata returned when the model uses the `url_context` tool to get information from a user-provided URL.""" CandidateOrDict = Union[Candidate, CandidateDict] @@ -6391,7 +6700,8 @@ class ModalityTokenCount(_common.BaseModel): description="""The modality associated with this token count.""", ) token_count: Optional[int] = Field( - default=None, description="""Number of tokens.""" + default=None, + description="""The number of tokens counted for this modality.""", ) @@ -6402,7 +6712,7 @@ class ModalityTokenCountDict(TypedDict, total=False): """The modality associated with this token count.""" token_count: Optional[int] - """Number of tokens.""" + """The number of tokens counted for this modality.""" ModalityTokenCountOrDict = Union[ModalityTokenCount, ModalityTokenCountDict] @@ -9069,44 +9379,83 @@ class DeleteModelResponseDict(TypedDict, total=False): class PrebuiltVoiceConfig(_common.BaseModel): - """The configuration for the prebuilt speaker to use.""" + """Configuration for a prebuilt voice.""" voice_name: Optional[str] = Field( - default=None, description="""The name of the preset voice to use.""" + default=None, description="""The name of the prebuilt voice to use.""" ) class PrebuiltVoiceConfigDict(TypedDict, total=False): - """The configuration for the prebuilt speaker to use.""" + """Configuration for a prebuilt voice.""" voice_name: Optional[str] - """The name of the preset voice to use.""" + """The name of the prebuilt voice to use.""" PrebuiltVoiceConfigOrDict = Union[PrebuiltVoiceConfig, PrebuiltVoiceConfigDict] +class ReplicatedVoiceConfig(_common.BaseModel): + """The configuration for the replicated voice to use. + + This data type is not supported in Gemini API. + """ + + mime_type: Optional[str] = Field( + default=None, + description="""Optional. The mimetype of the voice sample. Currently only mime_type=audio/pcm is supported, which is raw mono 16-bit signed little-endian pcm data, with 24k sampling rate.""", + ) + voice_sample_audio: Optional[bytes] = Field( + default=None, description="""Optional. The sample of the custom voice.""" + ) + + +class ReplicatedVoiceConfigDict(TypedDict, total=False): + """The configuration for the replicated voice to use. + + This data type is not supported in Gemini API. + """ + + mime_type: Optional[str] + """Optional. The mimetype of the voice sample. Currently only mime_type=audio/pcm is supported, which is raw mono 16-bit signed little-endian pcm data, with 24k sampling rate.""" + + voice_sample_audio: Optional[bytes] + """Optional. The sample of the custom voice.""" + + +ReplicatedVoiceConfigOrDict = Union[ + ReplicatedVoiceConfig, ReplicatedVoiceConfigDict +] + + class VoiceConfig(_common.BaseModel): - """The configuration for the voice to use.""" + """Configuration for a voice.""" prebuilt_voice_config: Optional[PrebuiltVoiceConfig] = Field( + default=None, description="""The configuration for a prebuilt voice.""" + ) + replicated_voice_config: Optional[ReplicatedVoiceConfig] = Field( default=None, - description="""The configuration for the prebuilt voice to use.""", + description="""Optional. The configuration for a replicated voice. This enables users to replicate a voice from an audio sample. This field is not supported in Gemini API.""", ) class VoiceConfigDict(TypedDict, total=False): - """The configuration for the voice to use.""" + """Configuration for a voice.""" prebuilt_voice_config: Optional[PrebuiltVoiceConfigDict] - """The configuration for the prebuilt voice to use.""" + """The configuration for a prebuilt voice.""" + + replicated_voice_config: Optional[ReplicatedVoiceConfigDict] + """Optional. The configuration for a replicated voice. This enables users to replicate a voice from an audio sample. This field is not supported in Gemini API.""" VoiceConfigOrDict = Union[VoiceConfig, VoiceConfigDict] class SpeakerVoiceConfig(_common.BaseModel): - """Configuration for a single speaker in a multi speaker setup.""" + """Configuration for a single speaker in a multi-speaker setup.""" speaker: Optional[str] = Field( default=None, @@ -9119,7 +9468,7 @@ class SpeakerVoiceConfig(_common.BaseModel): class SpeakerVoiceConfigDict(TypedDict, total=False): - """Configuration for a single speaker in a multi speaker setup.""" + """Configuration for a single speaker in a multi-speaker setup.""" speaker: Optional[str] """Required. The name of the speaker. This should be the same as the speaker name used in the prompt.""" @@ -9132,24 +9481,19 @@ class SpeakerVoiceConfigDict(TypedDict, total=False): class MultiSpeakerVoiceConfig(_common.BaseModel): - """The configuration for the multi-speaker setup. - - This data type is not supported in Vertex AI. - """ + """Configuration for a multi-speaker text-to-speech request.""" speaker_voice_configs: Optional[list[SpeakerVoiceConfig]] = Field( - default=None, description="""Required. All the enabled speaker voices.""" + default=None, + description="""Required. A list of configurations for the voices of the speakers. Exactly two speaker voice configurations must be provided.""", ) class MultiSpeakerVoiceConfigDict(TypedDict, total=False): - """The configuration for the multi-speaker setup. - - This data type is not supported in Vertex AI. - """ + """Configuration for a multi-speaker text-to-speech request.""" speaker_voice_configs: Optional[list[SpeakerVoiceConfigDict]] - """Required. All the enabled speaker voices.""" + """Required. A list of configurations for the voices of the speakers. Exactly two speaker voice configurations must be provided.""" MultiSpeakerVoiceConfigOrDict = Union[ @@ -9171,74 +9515,82 @@ class GenerationConfig(_common.BaseModel): ) audio_timestamp: Optional[bool] = Field( default=None, - description="""Optional. If enabled, audio timestamp will be included in the request to the model. This field is not supported in Gemini API.""", + description="""Optional. If enabled, audio timestamps will be included in the request to the model. This can be useful for synchronizing audio with other modalities in the response. This field is not supported in Gemini API.""", ) candidate_count: Optional[int] = Field( default=None, - description="""Optional. Number of candidates to generate.""", + description="""Optional. The number of candidate responses to generate. A higher `candidate_count` can provide more options to choose from, but it also consumes more resources. This can be useful for generating a variety of responses and selecting the best one.""", ) enable_affective_dialog: Optional[bool] = Field( default=None, - description="""Optional. If enabled, the model will detect emotions and adapt its responses accordingly. This field is not supported in Gemini API.""", + description="""Optional. If enabled, the model will detect emotions and adapt its responses accordingly. For example, if the model detects that the user is frustrated, it may provide a more empathetic response. This field is not supported in Gemini API.""", ) frequency_penalty: Optional[float] = Field( - default=None, description="""Optional. Frequency penalties.""" + default=None, + description="""Optional. Penalizes tokens based on their frequency in the generated text. A positive value helps to reduce the repetition of words and phrases. Valid values can range from [-2.0, 2.0].""", ) logprobs: Optional[int] = Field( - default=None, description="""Optional. Logit probabilities.""" + default=None, + description="""Optional. The number of top log probabilities to return for each token. This can be used to see which other tokens were considered likely candidates for a given position. A higher value will return more options, but it will also increase the size of the response.""", ) max_output_tokens: Optional[int] = Field( default=None, - description="""Optional. The maximum number of output tokens to generate per message.""", + description="""Optional. The maximum number of tokens to generate in the response. A token is approximately four characters. The default value varies by model. This parameter can be used to control the length of the generated text and prevent overly long responses.""", ) media_resolution: Optional[MediaResolution] = Field( default=None, - description="""Optional. If specified, the media resolution specified will be used.""", + description="""Optional. The token resolution at which input media content is sampled. This is used to control the trade-off between the quality of the response and the number of tokens used to represent the media. A higher resolution allows the model to perceive more detail, which can lead to a more nuanced response, but it will also use more tokens. This does not affect the image dimensions sent to the model.""", ) presence_penalty: Optional[float] = Field( - default=None, description="""Optional. Positive penalties.""" + default=None, + description="""Optional. Penalizes tokens that have already appeared in the generated text. A positive value encourages the model to generate more diverse and less repetitive text. Valid values can range from [-2.0, 2.0].""", ) response_logprobs: Optional[bool] = Field( default=None, - description="""Optional. If true, export the logprobs results in response.""", + description="""Optional. If set to true, the log probabilities of the output tokens are returned. Log probabilities are the logarithm of the probability of a token appearing in the output. A higher log probability means the token is more likely to be generated. This can be useful for analyzing the model's confidence in its own output and for debugging.""", ) response_mime_type: Optional[str] = Field( default=None, - description="""Optional. Output response mimetype of the generated candidate text. Supported mimetype: - `text/plain`: (default) Text output. - `application/json`: JSON response in the candidates. The model needs to be prompted to output the appropriate response type, otherwise the behavior is undefined. This is a preview feature.""", + description="""Optional. The IANA standard MIME type of the response. The model will generate output that conforms to this MIME type. Supported values include 'text/plain' (default) and 'application/json'. The model needs to be prompted to output the appropriate response type, otherwise the behavior is undefined. This is a preview feature.""", ) response_modalities: Optional[list[Modality]] = Field( - default=None, description="""Optional. The modalities of the response.""" + default=None, + description="""Optional. The modalities of the response. The model will generate a response that includes all the specified modalities. For example, if this is set to `[TEXT, IMAGE]`, the response will include both text and an image.""", ) response_schema: Optional[Schema] = Field( default=None, - description="""Optional. The `Schema` object allows the definition of input and output data types. These types can be objects, but also primitives and arrays. Represents a select subset of an [OpenAPI 3.0 schema object](https://spec.openapis.org/oas/v3.0.3#schema). If set, a compatible response_mime_type must also be set. Compatible mimetypes: `application/json`: Schema for JSON response.""", + description="""Optional. Lets you to specify a schema for the model's response, ensuring that the output conforms to a particular structure. This is useful for generating structured data such as JSON. The schema is a subset of the [OpenAPI 3.0 schema object](https://spec.openapis.org/oas/v3.0.3#schema) object. When this field is set, you must also set the `response_mime_type` to `application/json`.""", ) routing_config: Optional[GenerationConfigRoutingConfig] = Field( default=None, description="""Optional. Routing configuration. This field is not supported in Gemini API.""", ) - seed: Optional[int] = Field(default=None, description="""Optional. Seed.""") + seed: Optional[int] = Field( + default=None, + description="""Optional. A seed for the random number generator. By setting a seed, you can make the model's output mostly deterministic. For a given prompt and parameters (like temperature, top_p, etc.), the model will produce the same response every time. However, it's not a guaranteed absolute deterministic behavior. This is different from parameters like `temperature`, which control the *level* of randomness. `seed` ensures that the "random" choices the model makes are the same on every run, making it essential for testing and ensuring reproducible results.""", + ) speech_config: Optional[SpeechConfig] = Field( default=None, description="""Optional. The speech generation config.""" ) stop_sequences: Optional[list[str]] = Field( - default=None, description="""Optional. Stop sequences.""" + default=None, + description="""Optional. A list of character sequences that will stop the model from generating further tokens. If a stop sequence is generated, the output will end at that point. This is useful for controlling the length and structure of the output. For example, you can use ["\n", "###"] to stop generation at a new line or a specific marker.""", ) temperature: Optional[float] = Field( default=None, - description="""Optional. Controls the randomness of predictions.""", + description="""Optional. Controls the randomness of the output. A higher temperature results in more creative and diverse responses, while a lower temperature makes the output more predictable and focused. The valid range is (0.0, 2.0].""", ) thinking_config: Optional[ThinkingConfig] = Field( default=None, - description="""Optional. Config for thinking features. An error will be returned if this field is set for models that don't support thinking.""", + description="""Optional. Configuration for thinking features. An error will be returned if this field is set for models that don't support thinking.""", ) top_k: Optional[float] = Field( default=None, - description="""Optional. If specified, top-k sampling will be used.""", + description="""Optional. Specifies the top-k sampling threshold. The model considers only the top k most probable tokens for the next token. This can be useful for generating more coherent and less random text. For example, a `top_k` of 40 means the model will choose the next word from the 40 most likely words.""", ) top_p: Optional[float] = Field( default=None, - description="""Optional. If specified, nucleus sampling will be used.""", + description="""Optional. Specifies the nucleus sampling threshold. The model considers only the smallest set of tokens whose cumulative probability is at least `top_p`. This helps generate more diverse and less repetitive responses. For example, a `top_p` of 0.9 means the model considers tokens until the cumulative probability of the tokens to select from reaches 0.9. It's recommended to adjust either temperature or `top_p`, but not both.""", ) enable_enhanced_civic_answers: Optional[bool] = Field( default=None, @@ -9258,64 +9610,64 @@ class GenerationConfigDict(TypedDict, total=False): """ audio_timestamp: Optional[bool] - """Optional. If enabled, audio timestamp will be included in the request to the model. This field is not supported in Gemini API.""" + """Optional. If enabled, audio timestamps will be included in the request to the model. This can be useful for synchronizing audio with other modalities in the response. This field is not supported in Gemini API.""" candidate_count: Optional[int] - """Optional. Number of candidates to generate.""" + """Optional. The number of candidate responses to generate. A higher `candidate_count` can provide more options to choose from, but it also consumes more resources. This can be useful for generating a variety of responses and selecting the best one.""" enable_affective_dialog: Optional[bool] - """Optional. If enabled, the model will detect emotions and adapt its responses accordingly. This field is not supported in Gemini API.""" + """Optional. If enabled, the model will detect emotions and adapt its responses accordingly. For example, if the model detects that the user is frustrated, it may provide a more empathetic response. This field is not supported in Gemini API.""" frequency_penalty: Optional[float] - """Optional. Frequency penalties.""" + """Optional. Penalizes tokens based on their frequency in the generated text. A positive value helps to reduce the repetition of words and phrases. Valid values can range from [-2.0, 2.0].""" logprobs: Optional[int] - """Optional. Logit probabilities.""" + """Optional. The number of top log probabilities to return for each token. This can be used to see which other tokens were considered likely candidates for a given position. A higher value will return more options, but it will also increase the size of the response.""" max_output_tokens: Optional[int] - """Optional. The maximum number of output tokens to generate per message.""" + """Optional. The maximum number of tokens to generate in the response. A token is approximately four characters. The default value varies by model. This parameter can be used to control the length of the generated text and prevent overly long responses.""" media_resolution: Optional[MediaResolution] - """Optional. If specified, the media resolution specified will be used.""" + """Optional. The token resolution at which input media content is sampled. This is used to control the trade-off between the quality of the response and the number of tokens used to represent the media. A higher resolution allows the model to perceive more detail, which can lead to a more nuanced response, but it will also use more tokens. This does not affect the image dimensions sent to the model.""" presence_penalty: Optional[float] - """Optional. Positive penalties.""" + """Optional. Penalizes tokens that have already appeared in the generated text. A positive value encourages the model to generate more diverse and less repetitive text. Valid values can range from [-2.0, 2.0].""" response_logprobs: Optional[bool] - """Optional. If true, export the logprobs results in response.""" + """Optional. If set to true, the log probabilities of the output tokens are returned. Log probabilities are the logarithm of the probability of a token appearing in the output. A higher log probability means the token is more likely to be generated. This can be useful for analyzing the model's confidence in its own output and for debugging.""" response_mime_type: Optional[str] - """Optional. Output response mimetype of the generated candidate text. Supported mimetype: - `text/plain`: (default) Text output. - `application/json`: JSON response in the candidates. The model needs to be prompted to output the appropriate response type, otherwise the behavior is undefined. This is a preview feature.""" + """Optional. The IANA standard MIME type of the response. The model will generate output that conforms to this MIME type. Supported values include 'text/plain' (default) and 'application/json'. The model needs to be prompted to output the appropriate response type, otherwise the behavior is undefined. This is a preview feature.""" response_modalities: Optional[list[Modality]] - """Optional. The modalities of the response.""" + """Optional. The modalities of the response. The model will generate a response that includes all the specified modalities. For example, if this is set to `[TEXT, IMAGE]`, the response will include both text and an image.""" response_schema: Optional[SchemaDict] - """Optional. The `Schema` object allows the definition of input and output data types. These types can be objects, but also primitives and arrays. Represents a select subset of an [OpenAPI 3.0 schema object](https://spec.openapis.org/oas/v3.0.3#schema). If set, a compatible response_mime_type must also be set. Compatible mimetypes: `application/json`: Schema for JSON response.""" + """Optional. Lets you to specify a schema for the model's response, ensuring that the output conforms to a particular structure. This is useful for generating structured data such as JSON. The schema is a subset of the [OpenAPI 3.0 schema object](https://spec.openapis.org/oas/v3.0.3#schema) object. When this field is set, you must also set the `response_mime_type` to `application/json`.""" routing_config: Optional[GenerationConfigRoutingConfigDict] """Optional. Routing configuration. This field is not supported in Gemini API.""" seed: Optional[int] - """Optional. Seed.""" + """Optional. A seed for the random number generator. By setting a seed, you can make the model's output mostly deterministic. For a given prompt and parameters (like temperature, top_p, etc.), the model will produce the same response every time. However, it's not a guaranteed absolute deterministic behavior. This is different from parameters like `temperature`, which control the *level* of randomness. `seed` ensures that the "random" choices the model makes are the same on every run, making it essential for testing and ensuring reproducible results.""" speech_config: Optional[SpeechConfigDict] """Optional. The speech generation config.""" stop_sequences: Optional[list[str]] - """Optional. Stop sequences.""" + """Optional. A list of character sequences that will stop the model from generating further tokens. If a stop sequence is generated, the output will end at that point. This is useful for controlling the length and structure of the output. For example, you can use ["\n", "###"] to stop generation at a new line or a specific marker.""" temperature: Optional[float] - """Optional. Controls the randomness of predictions.""" + """Optional. Controls the randomness of the output. A higher temperature results in more creative and diverse responses, while a lower temperature makes the output more predictable and focused. The valid range is (0.0, 2.0].""" thinking_config: Optional[ThinkingConfigDict] - """Optional. Config for thinking features. An error will be returned if this field is set for models that don't support thinking.""" + """Optional. Configuration for thinking features. An error will be returned if this field is set for models that don't support thinking.""" top_k: Optional[float] - """Optional. If specified, top-k sampling will be used.""" + """Optional. Specifies the top-k sampling threshold. The model considers only the top k most probable tokens for the next token. This can be useful for generating more coherent and less random text. For example, a `top_k` of 40 means the model will choose the next word from the 40 most likely words.""" top_p: Optional[float] - """Optional. If specified, nucleus sampling will be used.""" + """Optional. Specifies the nucleus sampling threshold. The model considers only the smallest set of tokens whose cumulative probability is at least `top_p`. This helps generate more diverse and less repetitive responses. For example, a `top_p` of 0.9 means the model considers tokens until the cumulative probability of the tokens to select from reaches 0.9. It's recommended to adjust either temperature or `top_p`, but not both.""" enable_enhanced_civic_answers: Optional[bool] """Optional. Enables enhanced civic answers. It may not be available for all models. This field is not supported in Vertex AI.""" @@ -10887,6 +11239,14 @@ class DatasetStats(_common.BaseModel): This data type is not supported in Gemini API. """ + dropped_example_indices: Optional[list[int]] = Field( + default=None, + description="""Output only. A partial sample of the indices (starting from 1) of the dropped examples.""", + ) + dropped_example_reasons: Optional[list[str]] = Field( + default=None, + description="""Output only. For each index in `dropped_example_indices`, the user-facing reason why the example was dropped.""", + ) total_billable_character_count: Optional[int] = Field( default=None, description="""Output only. Number of billable characters in the tuning dataset.""", @@ -10927,6 +11287,12 @@ class DatasetStatsDict(TypedDict, total=False): This data type is not supported in Gemini API. """ + dropped_example_indices: Optional[list[int]] + """Output only. A partial sample of the indices (starting from 1) of the dropped examples.""" + + dropped_example_reasons: Optional[list[str]] + """Output only. For each index in `dropped_example_indices`, the user-facing reason why the example was dropped.""" + total_billable_character_count: Optional[int] """Output only. Number of billable characters in the tuning dataset.""" @@ -10956,8 +11322,9 @@ class DatasetStatsDict(TypedDict, total=False): class DistillationDataStats(_common.BaseModel): - """Statistics computed for datasets used for distillation. + """Statistics for distillation prompt dataset. + These statistics do not include the responses sampled from the teacher model. This data type is not supported in Gemini API. """ @@ -10968,8 +11335,9 @@ class DistillationDataStats(_common.BaseModel): class DistillationDataStatsDict(TypedDict, total=False): - """Statistics computed for datasets used for distillation. + """Statistics for distillation prompt dataset. + These statistics do not include the responses sampled from the teacher model. This data type is not supported in Gemini API. """ @@ -11054,6 +11422,14 @@ class PreferenceOptimizationDataStats(_common.BaseModel): This data type is not supported in Gemini API. """ + dropped_example_indices: Optional[list[int]] = Field( + default=None, + description="""Output only. A partial sample of the indices (starting from 1) of the dropped examples.""", + ) + dropped_example_reasons: Optional[list[str]] = Field( + default=None, + description="""Output only. For each index in `dropped_example_indices`, the user-facing reason why the example was dropped.""", + ) score_variance_per_example_distribution: Optional[DatasetDistribution] = ( Field( default=None, @@ -11096,6 +11472,12 @@ class PreferenceOptimizationDataStatsDict(TypedDict, total=False): This data type is not supported in Gemini API. """ + dropped_example_indices: Optional[list[int]] + """Output only. A partial sample of the indices (starting from 1) of the dropped examples.""" + + dropped_example_reasons: Optional[list[str]] + """Output only. For each index in `dropped_example_indices`, the user-facing reason why the example was dropped.""" + score_variance_per_example_distribution: Optional[DatasetDistributionDict] """Output only. Dataset distributions for scores variance per example.""" @@ -11196,284 +11578,815 @@ class SupervisedTuningDatasetDistribution(_common.BaseModel): default=None, description="""Output only. The median of the values in the population.""", ) - min: Optional[float] = Field( - default=None, - description="""Output only. The minimum of the population values.""", + min: Optional[float] = Field( + default=None, + description="""Output only. The minimum of the population values.""", + ) + p5: Optional[float] = Field( + default=None, + description="""Output only. The 5th percentile of the values in the population.""", + ) + p95: Optional[float] = Field( + default=None, + description="""Output only. The 95th percentile of the values in the population.""", + ) + sum: Optional[int] = Field( + default=None, + description="""Output only. Sum of a given population of values.""", + ) + + +class SupervisedTuningDatasetDistributionDict(TypedDict, total=False): + """Dataset distribution for Supervised Tuning. + + This data type is not supported in Gemini API. + """ + + billable_sum: Optional[int] + """Output only. Sum of a given population of values that are billable.""" + + buckets: Optional[list[SupervisedTuningDatasetDistributionDatasetBucketDict]] + """Output only. Defines the histogram bucket.""" + + max: Optional[float] + """Output only. The maximum of the population values.""" + + mean: Optional[float] + """Output only. The arithmetic mean of the values in the population.""" + + median: Optional[float] + """Output only. The median of the values in the population.""" + + min: Optional[float] + """Output only. The minimum of the population values.""" + + p5: Optional[float] + """Output only. The 5th percentile of the values in the population.""" + + p95: Optional[float] + """Output only. The 95th percentile of the values in the population.""" + + sum: Optional[int] + """Output only. Sum of a given population of values.""" + + +SupervisedTuningDatasetDistributionOrDict = Union[ + SupervisedTuningDatasetDistribution, SupervisedTuningDatasetDistributionDict +] + + +class SupervisedTuningDataStats(_common.BaseModel): + """Tuning data statistics for Supervised Tuning. + + This data type is not supported in Gemini API. + """ + + dropped_example_reasons: Optional[list[str]] = Field( + default=None, + description="""Output only. For each index in `truncated_example_indices`, the user-facing reason why the example was dropped.""", + ) + total_billable_character_count: Optional[int] = Field( + default=None, + description="""Output only. Number of billable characters in the tuning dataset.""", + ) + total_billable_token_count: Optional[int] = Field( + default=None, + description="""Output only. Number of billable tokens in the tuning dataset.""", + ) + total_truncated_example_count: Optional[int] = Field( + default=None, + description="""Output only. The number of examples in the dataset that have been dropped. An example can be dropped for reasons including: too many tokens, contains an invalid image, contains too many images, etc.""", + ) + total_tuning_character_count: Optional[int] = Field( + default=None, + description="""Output only. Number of tuning characters in the tuning dataset.""", + ) + truncated_example_indices: Optional[list[int]] = Field( + default=None, + description="""Output only. A partial sample of the indices (starting from 1) of the dropped examples.""", + ) + tuning_dataset_example_count: Optional[int] = Field( + default=None, + description="""Output only. Number of examples in the tuning dataset.""", + ) + tuning_step_count: Optional[int] = Field( + default=None, + description="""Output only. Number of tuning steps for this Tuning Job.""", + ) + user_dataset_examples: Optional[list[Content]] = Field( + default=None, + description="""Output only. Sample user messages in the training dataset uri.""", + ) + user_input_token_distribution: Optional[ + SupervisedTuningDatasetDistribution + ] = Field( + default=None, + description="""Output only. Dataset distributions for the user input tokens.""", + ) + user_message_per_example_distribution: Optional[ + SupervisedTuningDatasetDistribution + ] = Field( + default=None, + description="""Output only. Dataset distributions for the messages per example.""", + ) + user_output_token_distribution: Optional[ + SupervisedTuningDatasetDistribution + ] = Field( + default=None, + description="""Output only. Dataset distributions for the user output tokens.""", + ) + + +class SupervisedTuningDataStatsDict(TypedDict, total=False): + """Tuning data statistics for Supervised Tuning. + + This data type is not supported in Gemini API. + """ + + dropped_example_reasons: Optional[list[str]] + """Output only. For each index in `truncated_example_indices`, the user-facing reason why the example was dropped.""" + + total_billable_character_count: Optional[int] + """Output only. Number of billable characters in the tuning dataset.""" + + total_billable_token_count: Optional[int] + """Output only. Number of billable tokens in the tuning dataset.""" + + total_truncated_example_count: Optional[int] + """Output only. The number of examples in the dataset that have been dropped. An example can be dropped for reasons including: too many tokens, contains an invalid image, contains too many images, etc.""" + + total_tuning_character_count: Optional[int] + """Output only. Number of tuning characters in the tuning dataset.""" + + truncated_example_indices: Optional[list[int]] + """Output only. A partial sample of the indices (starting from 1) of the dropped examples.""" + + tuning_dataset_example_count: Optional[int] + """Output only. Number of examples in the tuning dataset.""" + + tuning_step_count: Optional[int] + """Output only. Number of tuning steps for this Tuning Job.""" + + user_dataset_examples: Optional[list[ContentDict]] + """Output only. Sample user messages in the training dataset uri.""" + + user_input_token_distribution: Optional[ + SupervisedTuningDatasetDistributionDict + ] + """Output only. Dataset distributions for the user input tokens.""" + + user_message_per_example_distribution: Optional[ + SupervisedTuningDatasetDistributionDict + ] + """Output only. Dataset distributions for the messages per example.""" + + user_output_token_distribution: Optional[ + SupervisedTuningDatasetDistributionDict + ] + """Output only. Dataset distributions for the user output tokens.""" + + +SupervisedTuningDataStatsOrDict = Union[ + SupervisedTuningDataStats, SupervisedTuningDataStatsDict +] + + +class TuningDataStats(_common.BaseModel): + """The tuning data statistic values for TuningJob. + + This data type is not supported in Gemini API. + """ + + distillation_data_stats: Optional[DistillationDataStats] = Field( + default=None, + description="""Output only. Statistics for distillation prompt dataset. These statistics do not include the responses sampled from the teacher model.""", + ) + preference_optimization_data_stats: Optional[ + PreferenceOptimizationDataStats + ] = Field( + default=None, + description="""Output only. Statistics for preference optimization.""", + ) + supervised_tuning_data_stats: Optional[SupervisedTuningDataStats] = Field( + default=None, description="""The SFT Tuning data stats.""" + ) + + +class TuningDataStatsDict(TypedDict, total=False): + """The tuning data statistic values for TuningJob. + + This data type is not supported in Gemini API. + """ + + distillation_data_stats: Optional[DistillationDataStatsDict] + """Output only. Statistics for distillation prompt dataset. These statistics do not include the responses sampled from the teacher model.""" + + preference_optimization_data_stats: Optional[ + PreferenceOptimizationDataStatsDict + ] + """Output only. Statistics for preference optimization.""" + + supervised_tuning_data_stats: Optional[SupervisedTuningDataStatsDict] + """The SFT Tuning data stats.""" + + +TuningDataStatsOrDict = Union[TuningDataStats, TuningDataStatsDict] + + +class EncryptionSpec(_common.BaseModel): + """Represents a customer-managed encryption key spec that can be applied to a top-level resource. + + This data type is not supported in Gemini API. + """ + + kms_key_name: Optional[str] = Field( + default=None, + description="""Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created.""", + ) + + +class EncryptionSpecDict(TypedDict, total=False): + """Represents a customer-managed encryption key spec that can be applied to a top-level resource. + + This data type is not supported in Gemini API. + """ + + kms_key_name: Optional[str] + """Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created.""" + + +EncryptionSpecOrDict = Union[EncryptionSpec, EncryptionSpecDict] + + +class PartnerModelTuningSpec(_common.BaseModel): + """Tuning spec for Partner models. + + This data type is not supported in Gemini API. + """ + + hyper_parameters: Optional[dict[str, Any]] = Field( + default=None, + description="""Hyperparameters for tuning. The accepted hyper_parameters and their valid range of values will differ depending on the base model.""", + ) + training_dataset_uri: Optional[str] = Field( + default=None, + description="""Required. Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file.""", + ) + validation_dataset_uri: Optional[str] = Field( + default=None, + description="""Optional. Cloud Storage path to file containing validation dataset for tuning. The dataset must be formatted as a JSONL file.""", + ) + + +class PartnerModelTuningSpecDict(TypedDict, total=False): + """Tuning spec for Partner models. + + This data type is not supported in Gemini API. + """ + + hyper_parameters: Optional[dict[str, Any]] + """Hyperparameters for tuning. The accepted hyper_parameters and their valid range of values will differ depending on the base model.""" + + training_dataset_uri: Optional[str] + """Required. Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file.""" + + validation_dataset_uri: Optional[str] + """Optional. Cloud Storage path to file containing validation dataset for tuning. The dataset must be formatted as a JSONL file.""" + + +PartnerModelTuningSpecOrDict = Union[ + PartnerModelTuningSpec, PartnerModelTuningSpecDict +] + + +class BleuMetricValue(_common.BaseModel): + """Bleu metric value for an instance. + + This data type is not supported in Gemini API. + """ + + score: Optional[float] = Field( + default=None, description="""Output only. Bleu score.""" + ) + + +class BleuMetricValueDict(TypedDict, total=False): + """Bleu metric value for an instance. + + This data type is not supported in Gemini API. + """ + + score: Optional[float] + """Output only. Bleu score.""" + + +BleuMetricValueOrDict = Union[BleuMetricValue, BleuMetricValueDict] + + +class CustomCodeExecutionResult(_common.BaseModel): + """Result for custom code execution metric. + + This data type is not supported in Gemini API. + """ + + score: Optional[float] = Field( + default=None, description="""Output only. Custom code execution score.""" + ) + + +class CustomCodeExecutionResultDict(TypedDict, total=False): + """Result for custom code execution metric. + + This data type is not supported in Gemini API. + """ + + score: Optional[float] + """Output only. Custom code execution score.""" + + +CustomCodeExecutionResultOrDict = Union[ + CustomCodeExecutionResult, CustomCodeExecutionResultDict +] + + +class ExactMatchMetricValue(_common.BaseModel): + """Exact match metric value for an instance. + + This data type is not supported in Gemini API. + """ + + score: Optional[float] = Field( + default=None, description="""Output only. Exact match score.""" + ) + + +class ExactMatchMetricValueDict(TypedDict, total=False): + """Exact match metric value for an instance. + + This data type is not supported in Gemini API. + """ + + score: Optional[float] + """Output only. Exact match score.""" + + +ExactMatchMetricValueOrDict = Union[ + ExactMatchMetricValue, ExactMatchMetricValueDict +] + + +class RawOutput(_common.BaseModel): + """Raw output. This data type is not supported in Gemini API.""" + + raw_output: Optional[list[str]] = Field( + default=None, description="""Output only. Raw output string.""" + ) + + +class RawOutputDict(TypedDict, total=False): + """Raw output. This data type is not supported in Gemini API.""" + + raw_output: Optional[list[str]] + """Output only. Raw output string.""" + + +RawOutputOrDict = Union[RawOutput, RawOutputDict] + + +class CustomOutput(_common.BaseModel): + """Spec for custom output. This data type is not supported in Gemini API.""" + + raw_outputs: Optional[RawOutput] = Field( + default=None, description="""Output only. List of raw output strings.""" + ) + + +class CustomOutputDict(TypedDict, total=False): + """Spec for custom output. This data type is not supported in Gemini API.""" + + raw_outputs: Optional[RawOutputDict] + """Output only. List of raw output strings.""" + + +CustomOutputOrDict = Union[CustomOutput, CustomOutputDict] + + +class PairwiseMetricResult(_common.BaseModel): + """Spec for pairwise metric result. + + This data type is not supported in Gemini API. + """ + + custom_output: Optional[CustomOutput] = Field( + default=None, description="""Output only. Spec for custom output.""" + ) + explanation: Optional[str] = Field( + default=None, + description="""Output only. Explanation for pairwise metric score.""", + ) + pairwise_choice: Optional[PairwiseChoice] = Field( + default=None, description="""Output only. Pairwise metric choice.""" + ) + + +class PairwiseMetricResultDict(TypedDict, total=False): + """Spec for pairwise metric result. + + This data type is not supported in Gemini API. + """ + + custom_output: Optional[CustomOutputDict] + """Output only. Spec for custom output.""" + + explanation: Optional[str] + """Output only. Explanation for pairwise metric score.""" + + pairwise_choice: Optional[PairwiseChoice] + """Output only. Pairwise metric choice.""" + + +PairwiseMetricResultOrDict = Union[ + PairwiseMetricResult, PairwiseMetricResultDict +] + + +class PointwiseMetricResult(_common.BaseModel): + """Spec for pointwise metric result. + + This data type is not supported in Gemini API. + """ + + custom_output: Optional[CustomOutput] = Field( + default=None, description="""Output only. Spec for custom output.""" + ) + explanation: Optional[str] = Field( + default=None, + description="""Output only. Explanation for pointwise metric score.""", + ) + score: Optional[float] = Field( + default=None, description="""Output only. Pointwise metric score.""" + ) + + +class PointwiseMetricResultDict(TypedDict, total=False): + """Spec for pointwise metric result. + + This data type is not supported in Gemini API. + """ + + custom_output: Optional[CustomOutputDict] + """Output only. Spec for custom output.""" + + explanation: Optional[str] + """Output only. Explanation for pointwise metric score.""" + + score: Optional[float] + """Output only. Pointwise metric score.""" + + +PointwiseMetricResultOrDict = Union[ + PointwiseMetricResult, PointwiseMetricResultDict +] + + +class RougeMetricValue(_common.BaseModel): + """Rouge metric value for an instance. + + This data type is not supported in Gemini API. + """ + + score: Optional[float] = Field( + default=None, description="""Output only. Rouge score.""" + ) + + +class RougeMetricValueDict(TypedDict, total=False): + """Rouge metric value for an instance. + + This data type is not supported in Gemini API. + """ + + score: Optional[float] + """Output only. Rouge score.""" + + +RougeMetricValueOrDict = Union[RougeMetricValue, RougeMetricValueDict] + + +class AggregationResult(_common.BaseModel): + """The aggregation result for a single metric. + + This data type is not supported in Gemini API. + """ + + aggregation_metric: Optional[AggregationMetric] = Field( + default=None, description="""Aggregation metric.""" + ) + bleu_metric_value: Optional[BleuMetricValue] = Field( + default=None, description="""Results for bleu metric.""" + ) + custom_code_execution_result: Optional[CustomCodeExecutionResult] = Field( + default=None, description="""Result for code execution metric.""" ) - p5: Optional[float] = Field( - default=None, - description="""Output only. The 5th percentile of the values in the population.""", + exact_match_metric_value: Optional[ExactMatchMetricValue] = Field( + default=None, description="""Results for exact match metric.""" ) - p95: Optional[float] = Field( - default=None, - description="""Output only. The 95th percentile of the values in the population.""", + pairwise_metric_result: Optional[PairwiseMetricResult] = Field( + default=None, description="""Result for pairwise metric.""" ) - sum: Optional[int] = Field( - default=None, - description="""Output only. Sum of a given population of values.""", + pointwise_metric_result: Optional[PointwiseMetricResult] = Field( + default=None, description="""Result for pointwise metric.""" + ) + rouge_metric_value: Optional[RougeMetricValue] = Field( + default=None, description="""Results for rouge metric.""" ) -class SupervisedTuningDatasetDistributionDict(TypedDict, total=False): - """Dataset distribution for Supervised Tuning. +class AggregationResultDict(TypedDict, total=False): + """The aggregation result for a single metric. This data type is not supported in Gemini API. """ - billable_sum: Optional[int] - """Output only. Sum of a given population of values that are billable.""" - - buckets: Optional[list[SupervisedTuningDatasetDistributionDatasetBucketDict]] - """Output only. Defines the histogram bucket.""" - - max: Optional[float] - """Output only. The maximum of the population values.""" + aggregation_metric: Optional[AggregationMetric] + """Aggregation metric.""" - mean: Optional[float] - """Output only. The arithmetic mean of the values in the population.""" + bleu_metric_value: Optional[BleuMetricValueDict] + """Results for bleu metric.""" - median: Optional[float] - """Output only. The median of the values in the population.""" + custom_code_execution_result: Optional[CustomCodeExecutionResultDict] + """Result for code execution metric.""" - min: Optional[float] - """Output only. The minimum of the population values.""" + exact_match_metric_value: Optional[ExactMatchMetricValueDict] + """Results for exact match metric.""" - p5: Optional[float] - """Output only. The 5th percentile of the values in the population.""" + pairwise_metric_result: Optional[PairwiseMetricResultDict] + """Result for pairwise metric.""" - p95: Optional[float] - """Output only. The 95th percentile of the values in the population.""" + pointwise_metric_result: Optional[PointwiseMetricResultDict] + """Result for pointwise metric.""" - sum: Optional[int] - """Output only. Sum of a given population of values.""" + rouge_metric_value: Optional[RougeMetricValueDict] + """Results for rouge metric.""" -SupervisedTuningDatasetDistributionOrDict = Union[ - SupervisedTuningDatasetDistribution, SupervisedTuningDatasetDistributionDict -] +AggregationResultOrDict = Union[AggregationResult, AggregationResultDict] -class SupervisedTuningDataStats(_common.BaseModel): - """Tuning data statistics for Supervised Tuning. +class BigQuerySource(_common.BaseModel): + """The BigQuery location for the input content. This data type is not supported in Gemini API. """ - dropped_example_reasons: Optional[list[str]] = Field( - default=None, - description="""Output only. For each index in `truncated_example_indices`, the user-facing reason why the example was dropped.""", - ) - total_billable_character_count: Optional[int] = Field( - default=None, - description="""Output only. Number of billable characters in the tuning dataset.""", - ) - total_billable_token_count: Optional[int] = Field( - default=None, - description="""Output only. Number of billable tokens in the tuning dataset.""", - ) - total_truncated_example_count: Optional[int] = Field( - default=None, - description="""Output only. The number of examples in the dataset that have been dropped. An example can be dropped for reasons including: too many tokens, contains an invalid image, contains too many images, etc.""", - ) - total_tuning_character_count: Optional[int] = Field( - default=None, - description="""Output only. Number of tuning characters in the tuning dataset.""", - ) - truncated_example_indices: Optional[list[int]] = Field( - default=None, - description="""Output only. A partial sample of the indices (starting from 1) of the dropped examples.""", - ) - tuning_dataset_example_count: Optional[int] = Field( + input_uri: Optional[str] = Field( default=None, - description="""Output only. Number of examples in the tuning dataset.""", + description="""Required. BigQuery URI to a table, up to 2000 characters long. Accepted forms: * BigQuery path. For example: `bq://projectId.bqDatasetId.bqTableId`.""", ) - tuning_step_count: Optional[int] = Field( + + +class BigQuerySourceDict(TypedDict, total=False): + """The BigQuery location for the input content. + + This data type is not supported in Gemini API. + """ + + input_uri: Optional[str] + """Required. BigQuery URI to a table, up to 2000 characters long. Accepted forms: * BigQuery path. For example: `bq://projectId.bqDatasetId.bqTableId`.""" + + +BigQuerySourceOrDict = Union[BigQuerySource, BigQuerySourceDict] + + +class GcsSource(_common.BaseModel): + """The Google Cloud Storage location for the input content. + + This data type is not supported in Gemini API. + """ + + uris: Optional[list[str]] = Field( default=None, - description="""Output only. Number of tuning steps for this Tuning Job.""", + description="""Required. Google Cloud Storage URI(-s) to the input file(s). May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/wildcards.""", ) - user_dataset_examples: Optional[list[Content]] = Field( - default=None, - description="""Output only. Sample user messages in the training dataset uri.""", + + +class GcsSourceDict(TypedDict, total=False): + """The Google Cloud Storage location for the input content. + + This data type is not supported in Gemini API. + """ + + uris: Optional[list[str]] + """Required. Google Cloud Storage URI(-s) to the input file(s). May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/wildcards.""" + + +GcsSourceOrDict = Union[GcsSource, GcsSourceDict] + + +class EvaluationDataset(_common.BaseModel): + """The dataset used for evaluation. + + This data type is not supported in Gemini API. + """ + + bigquery_source: Optional[BigQuerySource] = Field( + default=None, description="""BigQuery source holds the dataset.""" ) - user_input_token_distribution: Optional[ - SupervisedTuningDatasetDistribution - ] = Field( + gcs_source: Optional[GcsSource] = Field( default=None, - description="""Output only. Dataset distributions for the user input tokens.""", + description="""Cloud storage source holds the dataset. Currently only one Cloud Storage file path is supported.""", ) - user_message_per_example_distribution: Optional[ - SupervisedTuningDatasetDistribution - ] = Field( - default=None, - description="""Output only. Dataset distributions for the messages per example.""", + + +class EvaluationDatasetDict(TypedDict, total=False): + """The dataset used for evaluation. + + This data type is not supported in Gemini API. + """ + + bigquery_source: Optional[BigQuerySourceDict] + """BigQuery source holds the dataset.""" + + gcs_source: Optional[GcsSourceDict] + """Cloud storage source holds the dataset. Currently only one Cloud Storage file path is supported.""" + + +EvaluationDatasetOrDict = Union[EvaluationDataset, EvaluationDatasetDict] + + +class AggregationOutput(_common.BaseModel): + """The aggregation result for the entire dataset and all metrics. + + This data type is not supported in Gemini API. + """ + + aggregation_results: Optional[list[AggregationResult]] = Field( + default=None, description="""One AggregationResult per metric.""" ) - user_output_token_distribution: Optional[ - SupervisedTuningDatasetDistribution - ] = Field( + dataset: Optional[EvaluationDataset] = Field( default=None, - description="""Output only. Dataset distributions for the user output tokens.""", + description="""The dataset used for evaluation & aggregation.""", ) -class SupervisedTuningDataStatsDict(TypedDict, total=False): - """Tuning data statistics for Supervised Tuning. +class AggregationOutputDict(TypedDict, total=False): + """The aggregation result for the entire dataset and all metrics. This data type is not supported in Gemini API. """ - dropped_example_reasons: Optional[list[str]] - """Output only. For each index in `truncated_example_indices`, the user-facing reason why the example was dropped.""" + aggregation_results: Optional[list[AggregationResultDict]] + """One AggregationResult per metric.""" - total_billable_character_count: Optional[int] - """Output only. Number of billable characters in the tuning dataset.""" + dataset: Optional[EvaluationDatasetDict] + """The dataset used for evaluation & aggregation.""" - total_billable_token_count: Optional[int] - """Output only. Number of billable tokens in the tuning dataset.""" - total_truncated_example_count: Optional[int] - """Output only. The number of examples in the dataset that have been dropped. An example can be dropped for reasons including: too many tokens, contains an invalid image, contains too many images, etc.""" +AggregationOutputOrDict = Union[AggregationOutput, AggregationOutputDict] - total_tuning_character_count: Optional[int] - """Output only. Number of tuning characters in the tuning dataset.""" - truncated_example_indices: Optional[list[int]] - """Output only. A partial sample of the indices (starting from 1) of the dropped examples.""" +class OutputInfo(_common.BaseModel): + """Describes the info for output of EvaluationService.EvaluateDataset. - tuning_dataset_example_count: Optional[int] - """Output only. Number of examples in the tuning dataset.""" + This data type is not supported in Gemini API. + """ - tuning_step_count: Optional[int] - """Output only. Number of tuning steps for this Tuning Job.""" + gcs_output_directory: Optional[str] = Field( + default=None, + description="""Output only. The full path of the Cloud Storage directory created, into which the evaluation results and aggregation results are written.""", + ) - user_dataset_examples: Optional[list[ContentDict]] - """Output only. Sample user messages in the training dataset uri.""" - user_input_token_distribution: Optional[ - SupervisedTuningDatasetDistributionDict - ] - """Output only. Dataset distributions for the user input tokens.""" +class OutputInfoDict(TypedDict, total=False): + """Describes the info for output of EvaluationService.EvaluateDataset. - user_message_per_example_distribution: Optional[ - SupervisedTuningDatasetDistributionDict - ] - """Output only. Dataset distributions for the messages per example.""" + This data type is not supported in Gemini API. + """ - user_output_token_distribution: Optional[ - SupervisedTuningDatasetDistributionDict - ] - """Output only. Dataset distributions for the user output tokens.""" + gcs_output_directory: Optional[str] + """Output only. The full path of the Cloud Storage directory created, into which the evaluation results and aggregation results are written.""" -SupervisedTuningDataStatsOrDict = Union[ - SupervisedTuningDataStats, SupervisedTuningDataStatsDict -] +OutputInfoOrDict = Union[OutputInfo, OutputInfoDict] -class TuningDataStats(_common.BaseModel): - """The tuning data statistic values for TuningJob. +class EvaluateDatasetResponse(_common.BaseModel): + """Response in LRO for EvaluationService.EvaluateDataset. This data type is not supported in Gemini API. """ - distillation_data_stats: Optional[DistillationDataStats] = Field( - default=None, description="""Output only. Statistics for distillation.""" - ) - preference_optimization_data_stats: Optional[ - PreferenceOptimizationDataStats - ] = Field( + aggregation_output: Optional[AggregationOutput] = Field( default=None, - description="""Output only. Statistics for preference optimization.""", + description="""Output only. Aggregation statistics derived from results of EvaluationService.EvaluateDataset.""", ) - supervised_tuning_data_stats: Optional[SupervisedTuningDataStats] = Field( - default=None, description="""The SFT Tuning data stats.""" + output_info: Optional[OutputInfo] = Field( + default=None, + description="""Output only. Output info for EvaluationService.EvaluateDataset.""", ) -class TuningDataStatsDict(TypedDict, total=False): - """The tuning data statistic values for TuningJob. +class EvaluateDatasetResponseDict(TypedDict, total=False): + """Response in LRO for EvaluationService.EvaluateDataset. This data type is not supported in Gemini API. """ - distillation_data_stats: Optional[DistillationDataStatsDict] - """Output only. Statistics for distillation.""" - - preference_optimization_data_stats: Optional[ - PreferenceOptimizationDataStatsDict - ] - """Output only. Statistics for preference optimization.""" + aggregation_output: Optional[AggregationOutputDict] + """Output only. Aggregation statistics derived from results of EvaluationService.EvaluateDataset.""" - supervised_tuning_data_stats: Optional[SupervisedTuningDataStatsDict] - """The SFT Tuning data stats.""" + output_info: Optional[OutputInfoDict] + """Output only. Output info for EvaluationService.EvaluateDataset.""" -TuningDataStatsOrDict = Union[TuningDataStats, TuningDataStatsDict] +EvaluateDatasetResponseOrDict = Union[ + EvaluateDatasetResponse, EvaluateDatasetResponseDict +] -class EncryptionSpec(_common.BaseModel): - """Represents a customer-managed encryption key spec that can be applied to a top-level resource. +class EvaluateDatasetRun(_common.BaseModel): + """Evaluate Dataset Run Result for Tuning Job. This data type is not supported in Gemini API. """ - kms_key_name: Optional[str] = Field( + checkpoint_id: Optional[str] = Field( default=None, - description="""Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created.""", + description="""Output only. The checkpoint id used in the evaluation run. Only populated when evaluating checkpoints.""", + ) + error: Optional[GoogleRpcStatus] = Field( + default=None, + description="""Output only. The error of the evaluation run if any.""", + ) + evaluate_dataset_response: Optional[EvaluateDatasetResponse] = Field( + default=None, + description="""Output only. Results for EvaluationService.EvaluateDataset.""", + ) + operation_name: Optional[str] = Field( + default=None, + description="""Output only. The operation ID of the evaluation run. Format: `projects/{project}/locations/{location}/operations/{operation_id}`.""", ) -class EncryptionSpecDict(TypedDict, total=False): - """Represents a customer-managed encryption key spec that can be applied to a top-level resource. +class EvaluateDatasetRunDict(TypedDict, total=False): + """Evaluate Dataset Run Result for Tuning Job. This data type is not supported in Gemini API. """ - kms_key_name: Optional[str] - """Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created.""" + checkpoint_id: Optional[str] + """Output only. The checkpoint id used in the evaluation run. Only populated when evaluating checkpoints.""" + + error: Optional[GoogleRpcStatusDict] + """Output only. The error of the evaluation run if any.""" + evaluate_dataset_response: Optional[EvaluateDatasetResponseDict] + """Output only. Results for EvaluationService.EvaluateDataset.""" -EncryptionSpecOrDict = Union[EncryptionSpec, EncryptionSpecDict] + operation_name: Optional[str] + """Output only. The operation ID of the evaluation run. Format: `projects/{project}/locations/{location}/operations/{operation_id}`.""" -class PartnerModelTuningSpec(_common.BaseModel): - """Tuning spec for Partner models. +EvaluateDatasetRunOrDict = Union[EvaluateDatasetRun, EvaluateDatasetRunDict] + + +class FullFineTuningSpec(_common.BaseModel): + """Tuning Spec for Full Fine Tuning. This data type is not supported in Gemini API. """ - hyper_parameters: Optional[dict[str, Any]] = Field( + hyper_parameters: Optional[SupervisedHyperParameters] = Field( default=None, - description="""Hyperparameters for tuning. The accepted hyper_parameters and their valid range of values will differ depending on the base model.""", + description="""Optional. Hyperparameters for Full Fine Tuning.""", ) training_dataset_uri: Optional[str] = Field( default=None, - description="""Required. Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file.""", + description="""Required. Training dataset used for tuning. The dataset can be specified as either a Cloud Storage path to a JSONL file or as the resource name of a Vertex Multimodal Dataset.""", ) validation_dataset_uri: Optional[str] = Field( default=None, - description="""Optional. Cloud Storage path to file containing validation dataset for tuning. The dataset must be formatted as a JSONL file.""", + description="""Optional. Validation dataset used for tuning. The dataset can be specified as either a Cloud Storage path to a JSONL file or as the resource name of a Vertex Multimodal Dataset.""", ) -class PartnerModelTuningSpecDict(TypedDict, total=False): - """Tuning spec for Partner models. +class FullFineTuningSpecDict(TypedDict, total=False): + """Tuning Spec for Full Fine Tuning. This data type is not supported in Gemini API. """ - hyper_parameters: Optional[dict[str, Any]] - """Hyperparameters for tuning. The accepted hyper_parameters and their valid range of values will differ depending on the base model.""" + hyper_parameters: Optional[SupervisedHyperParametersDict] + """Optional. Hyperparameters for Full Fine Tuning.""" training_dataset_uri: Optional[str] - """Required. Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file.""" + """Required. Training dataset used for tuning. The dataset can be specified as either a Cloud Storage path to a JSONL file or as the resource name of a Vertex Multimodal Dataset.""" validation_dataset_uri: Optional[str] - """Optional. Cloud Storage path to file containing validation dataset for tuning. The dataset must be formatted as a JSONL file.""" + """Optional. Validation dataset used for tuning. The dataset can be specified as either a Cloud Storage path to a JSONL file or as the resource name of a Vertex Multimodal Dataset.""" -PartnerModelTuningSpecOrDict = Union[ - PartnerModelTuningSpec, PartnerModelTuningSpecDict -] +FullFineTuningSpecOrDict = Union[FullFineTuningSpec, FullFineTuningSpecDict] class VeoHyperParameters(_common.BaseModel): @@ -11621,10 +12534,17 @@ class TuningJob(_common.BaseModel): default=None, description="""Optional. The user-provided path to custom model weights. Set this field to tune a custom model. The path must be a Cloud Storage directory that contains the model weights in .safetensors format along with associated model metadata files. If this field is set, the base_model field must still be set to indicate which base model the custom model is derived from. This feature is only available for open source models.""", ) + evaluate_dataset_runs: Optional[list[EvaluateDatasetRun]] = Field( + default=None, + description="""Output only. Evaluation runs for the Tuning Job.""", + ) experiment: Optional[str] = Field( default=None, description="""Output only. The Experiment associated with this TuningJob.""", ) + full_fine_tuning_spec: Optional[FullFineTuningSpec] = Field( + default=None, description="""Tuning Spec for Full Fine Tuning.""" + ) labels: Optional[dict[str, str]] = Field( default=None, description="""Optional. The labels with user-defined metadata to organize TuningJob and generated resources such as Model and Endpoint. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels.""", @@ -11645,6 +12565,10 @@ class TuningJob(_common.BaseModel): default=None, description="""Optional. The display name of the TunedModel. The name can be up to 128 characters long and can consist of any UTF-8 characters. For continuous tuning, tuned_model_display_name will by default use the same display name as the pre-tuned model. If a new display name is provided, the tuning job will create a new model instead of a new version.""", ) + tuning_job_state: Optional[TuningJobState] = Field( + default=None, + description="""Output only. The detail state of the tuning job (while the overall `JobState` is running).""", + ) veo_tuning_spec: Optional[VeoTuningSpec] = Field( default=None, description="""Tuning Spec for Veo Tuning.""" ) @@ -11720,9 +12644,15 @@ class TuningJobDict(TypedDict, total=False): custom_base_model: Optional[str] """Optional. The user-provided path to custom model weights. Set this field to tune a custom model. The path must be a Cloud Storage directory that contains the model weights in .safetensors format along with associated model metadata files. If this field is set, the base_model field must still be set to indicate which base model the custom model is derived from. This feature is only available for open source models.""" + evaluate_dataset_runs: Optional[list[EvaluateDatasetRunDict]] + """Output only. Evaluation runs for the Tuning Job.""" + experiment: Optional[str] """Output only. The Experiment associated with this TuningJob.""" + full_fine_tuning_spec: Optional[FullFineTuningSpecDict] + """Tuning Spec for Full Fine Tuning.""" + labels: Optional[dict[str, str]] """Optional. The labels with user-defined metadata to organize TuningJob and generated resources such as Model and Endpoint. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels.""" @@ -11738,6 +12668,9 @@ class TuningJobDict(TypedDict, total=False): tuned_model_display_name: Optional[str] """Optional. The display name of the TunedModel. The name can be up to 128 characters long and can consist of any UTF-8 characters. For continuous tuning, tuned_model_display_name will by default use the same display name as the pre-tuned model. If a new display name is provided, the tuning job will create a new model instead of a new version.""" + tuning_job_state: Optional[TuningJobState] + """Output only. The detail state of the tuning job (while the overall `JobState` is running).""" + veo_tuning_spec: Optional[VeoTuningSpecDict] """Tuning Spec for Veo Tuning.""" @@ -13309,7 +14242,10 @@ class ListFileSearchStoresResponse(_common.BaseModel): sdk_http_response: Optional[HttpResponse] = Field( default=None, description="""Used to retain the full HTTP response.""" ) - next_page_token: Optional[str] = Field(default=None, description="""""") + next_page_token: Optional[str] = Field( + default=None, + description="""A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no more pages.""", + ) file_search_stores: Optional[list[FileSearchStore]] = Field( default=None, description="""The returned file search stores.""" ) @@ -13322,7 +14258,7 @@ class ListFileSearchStoresResponseDict(TypedDict, total=False): """Used to retain the full HTTP response.""" next_page_token: Optional[str] - """""" + """A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no more pages.""" file_search_stores: Optional[list[FileSearchStoreDict]] """The returned file search stores.""" @@ -14073,6 +15009,10 @@ class InlinedEmbedContentResponse(_common.BaseModel): description="""The error encountered while processing the request. """, ) + metadata: Optional[dict[str, Any]] = Field( + default=None, + description="""Output only. The metadata associated with the request.""", + ) class InlinedEmbedContentResponseDict(TypedDict, total=False): @@ -14086,6 +15026,9 @@ class InlinedEmbedContentResponseDict(TypedDict, total=False): """The error encountered while processing the request. """ + metadata: Optional[dict[str, Any]] + """Output only. The metadata associated with the request.""" + InlinedEmbedContentResponseOrDict = Union[ InlinedEmbedContentResponse, InlinedEmbedContentResponseDict