Skip to content

Commit 2024ead

Browse files
committed
fix: Resolve HIGH priority grounding_metadata state management issues
- Add interrupted parameter to __build_full_text_response to preserve interrupted signal when flushing pending text - Pass interrupted flag in turn_complete and interrupted blocks - Remove premature reset of last_grounding_metadata after interrupted (not a terminal event) - Add documentation for tool_call metadata persistence design decision Addresses review comments: - HIGH: Lost interrupted signal in full text response - HIGH: Premature reset after interrupted - MEDIUM: Duplicate reset logic (simplified by removing premature reset)
1 parent a8e16ab commit 2024ead

1 file changed

Lines changed: 11 additions & 3 deletions

File tree

src/google/adk/models/gemini_llm_connection.py

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -143,6 +143,7 @@ def __build_full_text_response(
143143
self,
144144
text: str,
145145
grounding_metadata: types.GroundingMetadata | None = None,
146+
interrupted: bool = False,
146147
):
147148
"""Builds a full text response.
148149
@@ -152,6 +153,7 @@ def __build_full_text_response(
152153
Args:
153154
text: The text to be included in the response.
154155
grounding_metadata: Optional grounding metadata to include.
156+
interrupted: Whether this response was interrupted.
155157
156158
Returns:
157159
An LlmResponse containing the full text.
@@ -162,6 +164,7 @@ def __build_full_text_response(
162164
parts=[types.Part.from_text(text=text)],
163165
),
164166
grounding_metadata=grounding_metadata,
167+
interrupted=interrupted,
165168
)
166169

167170
async def receive(self) -> AsyncGenerator[LlmResponse, None]:
@@ -295,7 +298,9 @@ async def receive(self) -> AsyncGenerator[LlmResponse, None]:
295298
if message.server_content.turn_complete:
296299
if text:
297300
yield self.__build_full_text_response(
298-
text, last_grounding_metadata
301+
text,
302+
last_grounding_metadata,
303+
interrupted=message.server_content.interrupted,
299304
)
300305
text = ''
301306
yield LlmResponse(
@@ -312,15 +317,14 @@ async def receive(self) -> AsyncGenerator[LlmResponse, None]:
312317
if message.server_content.interrupted:
313318
if text:
314319
yield self.__build_full_text_response(
315-
text, last_grounding_metadata
320+
text, last_grounding_metadata, interrupted=True
316321
)
317322
text = ''
318323
else:
319324
yield LlmResponse(
320325
interrupted=message.server_content.interrupted,
321326
grounding_metadata=last_grounding_metadata,
322327
)
323-
last_grounding_metadata = None # Reset after yielding
324328
if message.tool_call:
325329
if text:
326330
yield self.__build_full_text_response(text, last_grounding_metadata)
@@ -333,6 +337,10 @@ async def receive(self) -> AsyncGenerator[LlmResponse, None]:
333337
content=types.Content(role='model', parts=parts),
334338
grounding_metadata=last_grounding_metadata,
335339
)
340+
# Note: last_grounding_metadata is NOT reset here because tool_call
341+
# is part of an ongoing turn. The metadata persists until turn_complete
342+
# or interrupted with break, ensuring subsequent messages in the same
343+
# turn can access the grounding information.
336344
if message.session_resumption_update:
337345
logger.debug('Received session resumption message: %s', message)
338346
yield (

0 commit comments

Comments
 (0)