Skip to content

Commit 39607ef

Browse files
authored
Merge branch 'main' into fix/safe-json-serialize-exception-handling
2 parents 50540aa + 684a6e7 commit 39607ef

5 files changed

Lines changed: 93 additions & 6 deletions

File tree

pyproject.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -102,6 +102,7 @@ optional-dependencies.docs = [
102102
"myst-parser",
103103
"sphinx<9",
104104
"sphinx-autodoc-typehints",
105+
"sphinx-click",
105106
"sphinx-rtd-theme",
106107
]
107108
optional-dependencies.eval = [

src/google/adk/integrations/agent_identity/gcp_auth_provider.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -200,10 +200,7 @@ def _is_consent_completed(context: CallbackContext) -> bool:
200200
for call_id, _ in euc_responses.items():
201201
if call_id in euc_calls:
202202
call = euc_calls[call_id]
203-
if (
204-
call.args
205-
and call.args.get("function_call_id") == target_tool_call_id
206-
):
203+
if call.args and call.args.get("functionCallId") == target_tool_call_id:
207204
return True
208205
return False
209206

src/google/adk/optimization/local_eval_sampler.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -289,7 +289,11 @@ def _extract_eval_data(
289289
for eval_metric_result in per_invocation_result.eval_metric_results:
290290
eval_metric_results.append({
291291
"metric_name": eval_metric_result.metric_name,
292-
"score": round(eval_metric_result.score, 2), # accurate enough
292+
"score": (
293+
round(eval_metric_result.score, 2)
294+
if eval_metric_result.score is not None
295+
else None
296+
), # accurate enough
293297
"eval_status": eval_metric_result.eval_status.name,
294298
})
295299
per_invocation_result_dict = {

tests/unittests/integrations/agent_identity/test_gcp_auth_provider.py

Lines changed: 44 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525
from google.adk.agents.callback_context import CallbackContext
2626
from google.adk.auth.auth_credential import AuthCredentialTypes
2727
from google.adk.auth.auth_tool import AuthConfig
28+
from google.adk.auth.auth_tool import AuthToolArguments
2829
from google.adk.flows.llm_flows.functions import REQUEST_EUC_FUNCTION_CALL_NAME
2930
from google.adk.integrations.agent_identity import gcp_auth_provider
3031
from google.adk.integrations.agent_identity import GcpAuthProvider
@@ -399,7 +400,9 @@ async def test_get_auth_credential_returns_token_if_consent_was_completed(
399400
function_call = Mock()
400401
function_call.id = "auth-req-1"
401402
function_call.name = REQUEST_EUC_FUNCTION_CALL_NAME
402-
function_call.args = {"function_call_id": "call-123"}
403+
function_call.args = AuthToolArguments(
404+
function_call_id="call-123", auth_config=auth_config
405+
).model_dump(by_alias=True, exclude_none=True)
403406

404407
event1 = Mock()
405408
event1.get_function_calls.return_value = [function_call]
@@ -432,3 +435,43 @@ async def test_get_auth_credential_returns_token_if_consent_was_completed(
432435
assert auth_credential.auth_type == AuthCredentialTypes.HTTP
433436
assert auth_credential.http.scheme == "bearer"
434437
assert auth_credential.http.credentials.token == "test-token"
438+
439+
440+
async def test_get_auth_credential_raises_error_if_consent_canceled(
441+
mock_operation, auth_config, context, provider
442+
):
443+
function_call = Mock()
444+
function_call.id = "auth-req-1"
445+
function_call.name = REQUEST_EUC_FUNCTION_CALL_NAME
446+
function_call.args = AuthToolArguments(
447+
function_call_id="call-123", auth_config=auth_config
448+
).model_dump(by_alias=True, exclude_none=True)
449+
450+
event1 = Mock()
451+
event1.get_function_calls.return_value = [function_call]
452+
event1.get_function_responses.return_value = []
453+
454+
function_response = Mock()
455+
function_response.id = "auth-req-1"
456+
function_response.name = REQUEST_EUC_FUNCTION_CALL_NAME
457+
458+
event2 = Mock()
459+
event2.get_function_calls.return_value = []
460+
event2.get_function_responses.return_value = [function_response]
461+
462+
context.session.events = [event1, event2]
463+
context.function_call_id = "call-123"
464+
465+
meta = RetrieveCredentialsMetadata({
466+
"uri_consent_required": {
467+
"authorization_uri": "https://example.com/auth",
468+
"consent_nonce": "sample-nonce",
469+
}
470+
})
471+
mock_operation.metadata.value = RetrieveCredentialsMetadata.serialize(meta)
472+
mock_operation.done = False
473+
474+
with pytest.raises(
475+
RuntimeError, match="Failed to retrieve consent based credential."
476+
):
477+
await provider.get_auth_credential(auth_config, context)

tests/unittests/optimization/local_eval_sampler_test.py

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -338,6 +338,48 @@ async def test_extract_eval_data(mocker):
338338
]
339339

340340

341+
def test_extract_eval_data_preserves_none_metric_score(mocker):
342+
mock_eval_sets_manager = mocker.MagicMock(spec=EvalSetsManager)
343+
mock_eval_case = mocker.MagicMock()
344+
mock_eval_case.conversation_scenario = "test_scenario"
345+
mock_eval_sets_manager.get_eval_case.return_value = mock_eval_case
346+
347+
mock_metric_result = mocker.MagicMock(spec=EvalMetricResult)
348+
mock_metric_result.metric_name = "test_metric"
349+
mock_metric_result.score = None
350+
mock_metric_result.eval_status = EvalStatus.NOT_EVALUATED
351+
352+
mock_per_inv_result = mocker.MagicMock(spec=EvalMetricResultPerInvocation)
353+
mock_per_inv_result.actual_invocation = mocker.MagicMock(spec=Invocation)
354+
mock_per_inv_result.expected_invocation = mocker.MagicMock(spec=Invocation)
355+
mock_per_inv_result.eval_metric_results = [mock_metric_result]
356+
357+
mock_eval_result = mocker.MagicMock(spec=EvalCaseResult)
358+
mock_eval_result.eval_id = "t1"
359+
mock_eval_result.eval_metric_result_per_invocation = [mock_per_inv_result]
360+
361+
mocker.patch(
362+
"google.adk.optimization.local_eval_sampler.extract_single_invocation_info",
363+
side_effect=[{"info": "actual"}, {"info": "expected"}],
364+
)
365+
366+
config = LocalEvalSamplerConfig(
367+
eval_config=EvalConfig(),
368+
app_name="test_app",
369+
train_eval_set="train_set",
370+
train_eval_case_ids=["t1"],
371+
)
372+
interface = LocalEvalSampler(config, mock_eval_sets_manager)
373+
374+
eval_data = interface._extract_eval_data("train_set", [mock_eval_result])
375+
376+
assert eval_data["t1"]["invocations"][0]["eval_metric_results"] == [{
377+
"metric_name": "test_metric",
378+
"score": None,
379+
"eval_status": "NOT_EVALUATED",
380+
}]
381+
382+
341383
@pytest.mark.asyncio
342384
async def test_sample_and_score(mocker):
343385
# Mock results

0 commit comments

Comments
 (0)