Skip to content

Commit 531075c

Browse files
authored
Add Gemini Live API snippets (#851)
1 parent 16115b7 commit 531075c

3 files changed

Lines changed: 188 additions & 0 deletions

File tree

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
*.iml
22
.gradle
3+
gradle/gradle-daemon-jvm.properties
34
/local.properties
45
.idea/
56
.DS_Store

misc/src/main/AndroidManifest.xml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
<!--[START android_broadcast_receiver_10_manifest_permission]-->
2121
<uses-permission android:name="android.permission.ACCESS_COARSE_LOCATION" />
2222
<!--[END android_broadcast_receiver_10_manifest_permission]-->
23+
<uses-permission android:name="android.permission.RECORD_AUDIO" />
2324

2425
<application
2526
android:name=".MyApplication"
Lines changed: 186 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,186 @@
1+
/*
2+
* Copyright 2025 The Android Open Source Project
3+
*
4+
* Licensed under the Apache License, Version 2.0 (the "License");
5+
* you may not use this file except in compliance with the License.
6+
* You may obtain a copy of the License at
7+
*
8+
* https://www.apache.org/licenses/LICENSE-2.0
9+
*
10+
* Unless required by applicable law or agreed to in writing, software
11+
* distributed under the License is distributed on an "AS IS" BASIS,
12+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
* See the License for the specific language governing permissions and
14+
* limitations under the License.
15+
*/
16+
17+
package com.example.snippets.ai
18+
19+
import android.Manifest
20+
import android.app.Application
21+
import android.util.Log
22+
import androidx.annotation.RequiresPermission
23+
import androidx.lifecycle.AndroidViewModel
24+
import androidx.lifecycle.viewModelScope
25+
import com.google.firebase.Firebase
26+
import com.google.firebase.ai.ai
27+
import com.google.firebase.ai.type.FunctionCallPart
28+
import com.google.firebase.ai.type.FunctionDeclaration
29+
import com.google.firebase.ai.type.FunctionResponsePart
30+
import com.google.firebase.ai.type.GenerativeBackend
31+
import com.google.firebase.ai.type.LiveSession
32+
import com.google.firebase.ai.type.PublicPreviewAPI
33+
import com.google.firebase.ai.type.ResponseModality
34+
import com.google.firebase.ai.type.Schema
35+
import com.google.firebase.ai.type.SpeechConfig
36+
import com.google.firebase.ai.type.Tool
37+
import com.google.firebase.ai.type.Voice
38+
import com.google.firebase.ai.type.content
39+
import com.google.firebase.ai.type.liveGenerationConfig
40+
import kotlinx.coroutines.launch
41+
import kotlinx.serialization.json.JsonObject
42+
import kotlinx.serialization.json.JsonPrimitive
43+
import kotlinx.serialization.json.jsonPrimitive
44+
45+
// See https://github.com/android/ai-samples/tree/main/samples/gemini-live-todo
46+
// for a sample app with the Gemini Live API
47+
@OptIn(PublicPreviewAPI::class)
48+
class GeminiLiveViewModel(application: Application) : AndroidViewModel(application) {
49+
private val TAG = "GeminiLiveViewModel"
50+
51+
private var liveSession: LiveSession? = null
52+
53+
fun initializeLiveModel() {
54+
// [START android_ai_gemini_live_initialize]
55+
// Initialize the `LiveModel`
56+
val model = Firebase.ai(backend = GenerativeBackend.googleAI()).liveModel(
57+
modelName = "gemini-2.5-flash-native-audio-preview-12-2025",
58+
generationConfig = liveGenerationConfig {
59+
responseModality = ResponseModality.AUDIO
60+
speechConfig = SpeechConfig(voice = Voice("FENRIR"))
61+
}
62+
)
63+
// [END android_ai_gemini_live_initialize]
64+
}
65+
66+
fun initializeWithSystemInstruction() {
67+
// [START android_ai_gemini_live_system_instruction]
68+
val systemInstruction = content {
69+
text("You are a helpful assistant, you main role is [...]")
70+
}
71+
72+
val model = Firebase.ai(backend = GenerativeBackend.googleAI()).liveModel(
73+
modelName = "gemini-2.5-flash-native-audio-preview-12-2025",
74+
generationConfig = liveGenerationConfig {
75+
responseModality = ResponseModality.AUDIO
76+
speechConfig = SpeechConfig(voice = Voice("FENRIR"))
77+
},
78+
systemInstruction = systemInstruction,
79+
)
80+
// [END android_ai_gemini_live_system_instruction]
81+
}
82+
83+
@RequiresPermission(Manifest.permission.RECORD_AUDIO)
84+
fun startConversation() {
85+
val model = Firebase.ai(backend = GenerativeBackend.googleAI()).liveModel(
86+
modelName = "gemini-2.5-flash-native-audio-preview-12-2025",
87+
generationConfig = liveGenerationConfig {
88+
responseModality = ResponseModality.AUDIO
89+
speechConfig = SpeechConfig(voice = Voice("FENRIR"))
90+
}
91+
)
92+
93+
viewModelScope.launch {
94+
try {
95+
// [START android_ai_gemini_live_connect]
96+
val session = model.connect()
97+
session.startAudioConversation()
98+
// [END android_ai_gemini_live_connect]
99+
} catch (e: Exception) {
100+
Log.e(TAG, "Error connecting to the model", e)
101+
}
102+
}
103+
}
104+
105+
// [START android_ai_gemini_live_function_declaration]
106+
val itemList = mutableListOf<String>()
107+
108+
fun addList(item: String) {
109+
itemList.add(item)
110+
}
111+
112+
val addListFunctionDeclaration = FunctionDeclaration(
113+
name = "addList",
114+
description = "Function adding an item the list",
115+
parameters = mapOf(
116+
"item" to Schema.string("A short string describing the item to add to the list")
117+
)
118+
)
119+
// [END android_ai_gemini_live_function_declaration]
120+
121+
fun setupTool() {
122+
val systemInstruction = content { text("You are a helpful assistant...") }
123+
124+
// [START android_ai_gemini_live_tool_setup]
125+
val addListTool = Tool.functionDeclarations(listOf(addListFunctionDeclaration))
126+
127+
val model = Firebase.ai(backend = GenerativeBackend.googleAI()).liveModel(
128+
modelName = "gemini-2.5-flash-native-audio-preview-12-2025",
129+
generationConfig = liveGenerationConfig {
130+
responseModality = ResponseModality.AUDIO
131+
speechConfig = SpeechConfig(voice = Voice("FENRIR"))
132+
},
133+
systemInstruction = systemInstruction,
134+
tools = listOf(addListTool)
135+
)
136+
// [END android_ai_gemini_live_tool_setup]
137+
138+
viewModelScope.launch {
139+
try {
140+
liveSession = model.connect()
141+
} catch (e: Exception) {
142+
Log.e(TAG, "Error connecting to the model", e)
143+
}
144+
}
145+
}
146+
147+
@RequiresPermission(Manifest.permission.RECORD_AUDIO)
148+
fun startWithHandler() {
149+
viewModelScope.launch {
150+
liveSession?.let { session ->
151+
// [START android_ai_gemini_live_function_call_handler]
152+
session.startAudioConversation(::functionCallHandler)
153+
// [START_EXCLUDE]
154+
}
155+
}
156+
}
157+
158+
// [END_EXCLUDE]
159+
fun functionCallHandler(functionCall: FunctionCallPart): FunctionResponsePart {
160+
return when (functionCall.name) {
161+
"addList" -> {
162+
// Extract function parameter from functionCallPart
163+
val itemName = functionCall.args["item"]!!.jsonPrimitive.content
164+
// Call function with parameter
165+
addList(itemName)
166+
// Confirm the function call to the model
167+
val response = JsonObject(
168+
mapOf(
169+
"success" to JsonPrimitive(true),
170+
"message" to JsonPrimitive("Item $itemName added to the todo list")
171+
)
172+
)
173+
FunctionResponsePart(functionCall.name, response)
174+
}
175+
else -> {
176+
val response = JsonObject(
177+
mapOf(
178+
"error" to JsonPrimitive("Unknown function: ${functionCall.name}")
179+
)
180+
)
181+
FunctionResponsePart(functionCall.name, response)
182+
}
183+
}
184+
}
185+
// [END android_ai_gemini_live_function_call_handler]
186+
}

0 commit comments

Comments
 (0)