Skip to content

Commit 1c0caff

Browse files
style: run autoformat.sh (isort + pyink)
1 parent 1c68c69 commit 1c0caff

File tree

5 files changed

+13
-6
lines changed

5 files changed

+13
-6
lines changed

src/google/adk/evaluation/eval_metrics.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
from typing import Optional
2020
from typing import Union
2121

22+
from google.adk.models.base_llm import BaseLlm
2223
from google.genai import types as genai_types
2324
from pydantic import alias_generators
2425
from pydantic import BaseModel
@@ -28,8 +29,6 @@
2829
from pydantic.json_schema import SkipJsonSchema
2930
from typing_extensions import TypeAlias
3031

31-
from google.adk.models.base_llm import BaseLlm
32-
3332
from .common import EvalBaseModel
3433
from .eval_case import Invocation
3534
from .eval_rubrics import Rubric

src/google/adk/evaluation/hallucinations_v1.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -295,7 +295,9 @@ def __init__(self, eval_metric: EvalMetric):
295295
self.segmenter_prompt = _HALLUCINATIONS_V1_SEGMENTER_PROMPT
296296
self.sentence_validator_prompt = _HALLUCINATIONS_V1_VALIDATOR_PROMPT
297297
judge_model = self._judge_model_options.judge_model
298-
self._model = judge_model if isinstance(judge_model, str) else judge_model.model
298+
self._model = (
299+
judge_model if isinstance(judge_model, str) else judge_model.model
300+
)
299301
self._model_config = (
300302
self._judge_model_options.judge_model_config
301303
or genai_types.GenerateContentConfig()

src/google/adk/evaluation/llm_as_judge.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -137,7 +137,9 @@ async def evaluate_invocations(
137137
for actual, expected in zip(actual_invocations, expected_invocations):
138138
auto_rater_prompt = self.format_auto_rater_prompt(actual, expected)
139139
judge_model = self._judge_model_options.judge_model
140-
model_str = judge_model if isinstance(judge_model, str) else judge_model.model
140+
model_str = (
141+
judge_model if isinstance(judge_model, str) else judge_model.model
142+
)
141143
llm_request = LlmRequest(
142144
model=model_str,
143145
contents=[

src/google/adk/evaluation/simulation/llm_backed_user_simulator.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -181,7 +181,9 @@ async def _get_llm_response(
181181
)
182182

183183
config_model = self._config.model
184-
model_str = config_model if isinstance(config_model, str) else config_model.model
184+
model_str = (
185+
config_model if isinstance(config_model, str) else config_model.model
186+
)
185187
llm_request = LlmRequest(
186188
model=model_str,
187189
config=self._config.model_configuration,

src/google/adk/evaluation/simulation/per_turn_user_simulator_quality_v1.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -328,7 +328,9 @@ async def _evaluate_intermediate_turn(
328328
)
329329

330330
judge_model = self._llm_options.judge_model
331-
model_str = judge_model if isinstance(judge_model, str) else judge_model.model
331+
model_str = (
332+
judge_model if isinstance(judge_model, str) else judge_model.model
333+
)
332334
llm_request = LlmRequest(
333335
model=model_str,
334336
contents=[

0 commit comments

Comments
 (0)