Skip to content

Commit 5b5845d

Browse files
jsondaicopybara-github
authored andcommitted
feat: GenAI Client(evals) - Support N+1 Agent Engine inference via agent_data in run_inference()
PiperOrigin-RevId: 908461295
1 parent 68f053e commit 5b5845d

3 files changed

Lines changed: 523 additions & 88 deletions

File tree

Lines changed: 159 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,159 @@
1+
# Copyright 2025 Google LLC
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
#
15+
# pylint: disable=protected-access,bad-continuation,missing-function-docstring
16+
17+
import pytest
18+
19+
from tests.unit.vertexai.genai.replays import pytest_helper
20+
from vertexai._genai import types
21+
from google.genai import types as genai_types
22+
23+
pytest.importorskip(
24+
"google.adk", reason="google-adk not installed, skipping ADK agent tests"
25+
)
26+
from google.adk.agents import ( # noqa: E402
27+
LlmAgent,
28+
) # pylint: disable=g-import-not-at-top,g-bad-import-order
29+
30+
31+
def test_inference_with_eval_cases_multi_turn_agent_data(client):
32+
"""Tests run_inference with multi-turn agent_data in eval_cases.
33+
34+
Verifies that run_inference() accepts an EvaluationDataset with
35+
eval_cases containing agent_data (no eval_dataset_df). The agent_data
36+
has 2 turns: turn 0 is a completed user+agent exchange (history),
37+
turn 1 is a new user query. The agent should see the history and
38+
respond to the final query in context.
39+
"""
40+
agent = LlmAgent(
41+
name="test_agent",
42+
model="gemini-2.5-flash",
43+
instruction="You are a helpful assistant. Answer questions concisely.",
44+
)
45+
46+
eval_case = types.EvalCase(
47+
agent_data=types.evals.AgentData(
48+
turns=[
49+
types.evals.ConversationTurn(
50+
turn_index=0,
51+
events=[
52+
types.evals.AgentEvent(
53+
author="user",
54+
content=genai_types.Content(
55+
role="user",
56+
parts=[genai_types.Part(text="My name is Alice.")],
57+
),
58+
),
59+
types.evals.AgentEvent(
60+
author="test_agent",
61+
content=genai_types.Content(
62+
role="model",
63+
parts=[
64+
genai_types.Part(
65+
text="Hello Alice! How can I help you?"
66+
)
67+
],
68+
),
69+
),
70+
],
71+
),
72+
types.evals.ConversationTurn(
73+
turn_index=1,
74+
events=[
75+
types.evals.AgentEvent(
76+
author="user",
77+
content=genai_types.Content(
78+
role="user",
79+
parts=[genai_types.Part(text="What is my name?")],
80+
),
81+
),
82+
],
83+
),
84+
],
85+
),
86+
)
87+
eval_dataset = types.EvaluationDataset(eval_cases=[eval_case])
88+
89+
inference_result = client.evals.run_inference(
90+
agent=agent,
91+
src=eval_dataset,
92+
)
93+
assert isinstance(inference_result, types.EvaluationDataset)
94+
assert inference_result.eval_dataset_df is not None
95+
assert "agent_data" in inference_result.eval_dataset_df.columns
96+
97+
98+
def test_inference_with_eval_cases_agent_engine_agent_data(client):
99+
"""Tests N+1 inference with agent_data via remote Agent Engine."""
100+
agent_engine = client.agent_engines.get(
101+
name="projects/977012026409/locations/us-central1"
102+
"/reasoningEngines/7188347537655332864"
103+
)
104+
105+
eval_case = types.EvalCase(
106+
agent_data=types.evals.AgentData(
107+
turns=[
108+
types.evals.ConversationTurn(
109+
turn_index=0,
110+
events=[
111+
types.evals.AgentEvent(
112+
author="user",
113+
content=genai_types.Content(
114+
role="user",
115+
parts=[genai_types.Part(text="My name is Bob.")],
116+
),
117+
),
118+
types.evals.AgentEvent(
119+
author="model",
120+
content=genai_types.Content(
121+
role="model",
122+
parts=[
123+
genai_types.Part(text="Hi Bob! Nice to meet you.")
124+
],
125+
),
126+
),
127+
],
128+
),
129+
types.evals.ConversationTurn(
130+
turn_index=1,
131+
events=[
132+
types.evals.AgentEvent(
133+
author="user",
134+
content=genai_types.Content(
135+
role="user",
136+
parts=[genai_types.Part(text="What is my name?")],
137+
),
138+
),
139+
],
140+
),
141+
],
142+
),
143+
)
144+
eval_dataset = types.EvaluationDataset(eval_cases=[eval_case])
145+
146+
inference_result = client.evals.run_inference(
147+
agent=agent_engine,
148+
src=eval_dataset,
149+
)
150+
assert isinstance(inference_result, types.EvaluationDataset)
151+
assert inference_result.eval_dataset_df is not None
152+
assert "agent_data" in inference_result.eval_dataset_df.columns
153+
154+
155+
pytestmark = pytest_helper.setup(
156+
file=__file__,
157+
globals_for_file=globals(),
158+
test_method="evals.run_inference",
159+
)

0 commit comments

Comments
 (0)