Skip to content

Commit e10a710

Browse files
author
omkute10
committed
Add debug logging to VertexAiSearchTool
1 parent 5d9a7e7 commit e10a710

File tree

2 files changed

+163
-14
lines changed

2 files changed

+163
-14
lines changed

src/google/adk/tools/vertex_ai_search_tool.py

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414

1515
from __future__ import annotations
1616

17+
import logging
1718
from typing import Optional
1819
from typing import TYPE_CHECKING
1920

@@ -25,6 +26,8 @@
2526
from .base_tool import BaseTool
2627
from .tool_context import ToolContext
2728

29+
_logger = logging.getLogger(__name__)
30+
2831
if TYPE_CHECKING:
2932
from ..models import LlmRequest
3033

@@ -37,6 +40,26 @@ class VertexAiSearchTool(BaseTool):
3740
search_engine_id: The Vertex AI search engine resource ID.
3841
"""
3942

43+
@staticmethod
44+
def _extract_resource_id(resource_path: str, resource_type: str) -> str:
45+
"""Extracts the resource ID from a full resource path.
46+
47+
Args:
48+
resource_path: The full resource path (e.g., "projects/p/locations/l/collections/c/engines/e")
49+
resource_type: The type of resource to extract (e.g., 'engines', 'dataStores')
50+
51+
Returns:
52+
The extracted resource ID
53+
"""
54+
parts = resource_path.split('/')
55+
try:
56+
resource_index = parts.index(resource_type)
57+
if resource_index + 1 < len(parts):
58+
return parts[resource_index + 1]
59+
except ValueError:
60+
pass
61+
return resource_path # Return original if pattern not matched
62+
4063
def __init__(
4164
self,
4265
*,
@@ -83,6 +106,11 @@ def __init__(
83106
self.data_store_id = data_store_id
84107
self.data_store_specs = data_store_specs
85108
self.search_engine_id = search_engine_id
109+
self._search_engine_name = (
110+
self._extract_resource_id(search_engine_id, 'engines')
111+
if search_engine_id
112+
else None
113+
)
86114
self.filter = filter
87115
self.max_results = max_results
88116
self.bypass_multi_tools_limit = bypass_multi_tools_limit
@@ -102,6 +130,15 @@ async def process_llm_request(
102130
)
103131
llm_request.config = llm_request.config or types.GenerateContentConfig()
104132
llm_request.config.tools = llm_request.config.tools or []
133+
_logger.debug(
134+
'Adding Vertex AI Search tool config to LLM request: datastore=%s,'
135+
' engine=%s, filter=%s, max_results=%s, data_store_specs=%s',
136+
self.data_store_id,
137+
self._search_engine_name or self.search_engine_id,
138+
self.filter,
139+
self.max_results,
140+
self.data_store_specs,
141+
)
105142
llm_request.config.tools.append(
106143
types.Tool(
107144
retrieval=types.Retrieval(

tests/unittests/tools/test_vertex_ai_search_tool.py

Lines changed: 126 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,8 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15+
import logging
16+
1517
from google.adk.agents.invocation_context import InvocationContext
1618
from google.adk.agents.sequential_agent import SequentialAgent
1719
from google.adk.models.llm_request import LlmRequest
@@ -24,6 +26,8 @@
2426
from google.genai import types
2527
import pytest
2628

29+
VERTEX_SEARCH_TOOL_LOGGER_NAME = 'google.adk.tools.vertex_ai_search_tool'
30+
2731

2832
async def _create_tool_context() -> ToolContext:
2933
session_service = InMemorySessionService()
@@ -121,12 +125,32 @@ def test_init_with_data_store_id(self):
121125
tool = VertexAiSearchTool(data_store_id='test_data_store')
122126
assert tool.data_store_id == 'test_data_store'
123127
assert tool.search_engine_id is None
128+
assert tool.data_store_specs is None
124129

125130
def test_init_with_search_engine_id(self):
126131
"""Test initialization with search engine ID."""
127132
tool = VertexAiSearchTool(search_engine_id='test_search_engine')
128133
assert tool.search_engine_id == 'test_search_engine'
129134
assert tool.data_store_id is None
135+
assert tool.data_store_specs is None
136+
137+
def test_init_with_engine_and_specs(self):
138+
"""Test initialization with search engine ID and specs."""
139+
specs = [
140+
types.VertexAISearchDataStoreSpec(
141+
dataStore='projects/p/locations/l/collections/default_collection/dataStores/spec_store_id'
142+
)
143+
]
144+
tool = VertexAiSearchTool(
145+
search_engine_id='projects/p/locations/l/collections/default_collection/engines/test_search_engine',
146+
data_store_specs=specs,
147+
)
148+
assert (
149+
tool.search_engine_id
150+
== 'projects/p/locations/l/collections/default_collection/engines/test_search_engine'
151+
)
152+
assert tool.data_store_id is None
153+
assert tool.data_store_specs == specs
130154

131155
def test_init_with_neither_raises_error(self):
132156
"""Test that initialization without either ID raises ValueError."""
@@ -146,10 +170,27 @@ def test_init_with_both_raises_error(self):
146170
data_store_id='test_data_store', search_engine_id='test_search_engine'
147171
)
148172

173+
def test_init_with_specs_but_no_engine_raises_error(self):
174+
"""Test that specs without engine ID raises ValueError."""
175+
specs = [
176+
types.VertexAISearchDataStoreSpec(
177+
dataStore='projects/p/locations/l/collections/default_collection/dataStores/spec_store_id'
178+
)
179+
]
180+
with pytest.raises(
181+
ValueError,
182+
match='Either data_store_id or search_engine_id must be specified',
183+
):
184+
VertexAiSearchTool(data_store_specs=specs)
185+
149186
@pytest.mark.asyncio
150-
async def test_process_llm_request_with_simple_gemini_model(self):
187+
async def test_process_llm_request_with_simple_gemini_model(self, caplog):
151188
"""Test processing LLM request with simple Gemini model name."""
152-
tool = VertexAiSearchTool(data_store_id='test_data_store')
189+
caplog.set_level(logging.DEBUG, logger=VERTEX_SEARCH_TOOL_LOGGER_NAME)
190+
191+
tool = VertexAiSearchTool(
192+
data_store_id='test_data_store', filter='f', max_results=5
193+
)
153194
tool_context = await _create_tool_context()
154195

155196
llm_request = LlmRequest(
@@ -162,17 +203,49 @@ async def test_process_llm_request_with_simple_gemini_model(self):
162203

163204
assert llm_request.config.tools is not None
164205
assert len(llm_request.config.tools) == 1
165-
assert llm_request.config.tools[0].retrieval is not None
166-
assert llm_request.config.tools[0].retrieval.vertex_ai_search is not None
206+
retrieval_tool = llm_request.config.tools[0]
207+
assert retrieval_tool.retrieval is not None
208+
assert retrieval_tool.retrieval.vertex_ai_search is not None
209+
assert (
210+
retrieval_tool.retrieval.vertex_ai_search.datastore == 'test_data_store'
211+
)
212+
assert retrieval_tool.retrieval.vertex_ai_search.engine is None
213+
assert retrieval_tool.retrieval.vertex_ai_search.filter == 'f'
214+
assert retrieval_tool.retrieval.vertex_ai_search.max_results == 5
215+
assert retrieval_tool.retrieval.vertex_ai_search.data_store_specs is None
216+
217+
# Check for debug log message and its components
218+
debug_messages = [
219+
r.message for r in caplog.records if r.levelno == logging.DEBUG
220+
]
221+
debug_message = '\n'.join(debug_messages)
222+
assert 'Adding Vertex AI Search tool config to LLM request' in debug_message
223+
assert 'datastore=test_data_store' in debug_message
224+
assert 'engine=None' in debug_message
225+
assert 'filter=f' in debug_message
226+
assert 'max_results=5' in debug_message
227+
assert 'data_store_specs=None' in debug_message
167228

168229
@pytest.mark.asyncio
169-
async def test_process_llm_request_with_path_based_gemini_model(self):
230+
async def test_process_llm_request_with_path_based_gemini_model(self, caplog):
170231
"""Test processing LLM request with path-based Gemini model name."""
171-
tool = VertexAiSearchTool(data_store_id='test_data_store')
232+
caplog.set_level(logging.DEBUG, logger=VERTEX_SEARCH_TOOL_LOGGER_NAME)
233+
234+
specs = [
235+
types.VertexAISearchDataStoreSpec(
236+
dataStore='projects/p/locations/l/collections/default_collection/dataStores/spec_store_id'
237+
)
238+
]
239+
tool = VertexAiSearchTool(
240+
search_engine_id='projects/p/locations/l/collections/default_collection/engines/test_engine',
241+
data_store_specs=specs,
242+
filter='f2',
243+
max_results=10,
244+
)
172245
tool_context = await _create_tool_context()
173246

174247
llm_request = LlmRequest(
175-
model='projects/265104255505/locations/us-central1/publishers/google/models/gemini-2.0-flash-001',
248+
model='projects/p/locations/l/publishers/g/models/gemini-2.0-flash-001',
176249
config=types.GenerateContentConfig(),
177250
)
178251

@@ -182,8 +255,30 @@ async def test_process_llm_request_with_path_based_gemini_model(self):
182255

183256
assert llm_request.config.tools is not None
184257
assert len(llm_request.config.tools) == 1
185-
assert llm_request.config.tools[0].retrieval is not None
186-
assert llm_request.config.tools[0].retrieval.vertex_ai_search is not None
258+
retrieval_tool = llm_request.config.tools[0]
259+
assert retrieval_tool.retrieval is not None
260+
assert retrieval_tool.retrieval.vertex_ai_search is not None
261+
assert retrieval_tool.retrieval.vertex_ai_search.datastore is None
262+
assert (
263+
retrieval_tool.retrieval.vertex_ai_search.engine
264+
== 'projects/p/locations/l/collections/default_collection/engines/test_engine'
265+
)
266+
assert retrieval_tool.retrieval.vertex_ai_search.filter == 'f2'
267+
assert retrieval_tool.retrieval.vertex_ai_search.max_results == 10
268+
assert retrieval_tool.retrieval.vertex_ai_search.data_store_specs == specs
269+
270+
# Check for debug log message and its components
271+
debug_messages = [
272+
r.message for r in caplog.records if r.levelno == logging.DEBUG
273+
]
274+
debug_message = '\n'.join(debug_messages)
275+
assert 'Adding Vertex AI Search tool config to LLM request' in debug_message
276+
assert 'datastore=None' in debug_message
277+
assert 'engine=test_engine' in debug_message
278+
assert 'filter=f2' in debug_message
279+
assert 'max_results=10' in debug_message
280+
assert 'data_store_specs=' in debug_message
281+
assert 'spec_store_id' in debug_message
187282

188283
@pytest.mark.asyncio
189284
async def test_process_llm_request_with_gemini_1_and_other_tools_raises_error(
@@ -230,7 +325,9 @@ async def test_process_llm_request_with_path_based_gemini_1_and_other_tools_rais
230325
)
231326

232327
llm_request = LlmRequest(
233-
model='projects/265104255505/locations/us-central1/publishers/google/models/gemini-1.5-pro-preview',
328+
model=(
329+
'projects/p/locations/l/publishers/g/models/gemini-1.5-pro-preview'
330+
),
234331
config=types.GenerateContentConfig(tools=[existing_tool]),
235332
)
236333

@@ -273,7 +370,9 @@ async def test_process_llm_request_with_path_based_non_gemini_model_raises_error
273370
tool = VertexAiSearchTool(data_store_id='test_data_store')
274371
tool_context = await _create_tool_context()
275372

276-
non_gemini_path = 'projects/265104255505/locations/us-central1/publishers/google/models/claude-3-sonnet'
373+
non_gemini_path = (
374+
'projects/p/locations/l/publishers/g/models/claude-3-sonnet'
375+
)
277376
llm_request = LlmRequest(
278377
model=non_gemini_path, config=types.GenerateContentConfig()
279378
)
@@ -291,9 +390,11 @@ async def test_process_llm_request_with_path_based_non_gemini_model_raises_error
291390

292391
@pytest.mark.asyncio
293392
async def test_process_llm_request_with_gemini_2_and_other_tools_succeeds(
294-
self,
393+
self, caplog
295394
):
296395
"""Test that Gemini 2.x with other tools succeeds."""
396+
caplog.set_level(logging.DEBUG, logger=VERTEX_SEARCH_TOOL_LOGGER_NAME)
397+
297398
tool = VertexAiSearchTool(data_store_id='test_data_store')
298399
tool_context = await _create_tool_context()
299400

@@ -316,5 +417,16 @@ async def test_process_llm_request_with_gemini_2_and_other_tools_succeeds(
316417
assert llm_request.config.tools is not None
317418
assert len(llm_request.config.tools) == 2
318419
assert llm_request.config.tools[0] == existing_tool
319-
assert llm_request.config.tools[1].retrieval is not None
320-
assert llm_request.config.tools[1].retrieval.vertex_ai_search is not None
420+
retrieval_tool = llm_request.config.tools[1]
421+
assert retrieval_tool.retrieval is not None
422+
assert retrieval_tool.retrieval.vertex_ai_search is not None
423+
assert (
424+
retrieval_tool.retrieval.vertex_ai_search.datastore == 'test_data_store'
425+
)
426+
427+
assert 'Adding Vertex AI Search tool config to LLM request' in caplog.text
428+
assert 'datastore=test_data_store' in caplog.text
429+
assert 'engine=None' in caplog.text
430+
assert 'filter=None' in caplog.text
431+
assert 'max_results=None' in caplog.text
432+
assert 'data_store_specs=None' in caplog.text

0 commit comments

Comments
 (0)