Skip to content

Commit c12aedc

Browse files
vertex-sdk-botcopybara-github
authored andcommitted
feat: Limit metric registry support to only custom code execution metric and llm based metric in SDK
PiperOrigin-RevId: 892623204
1 parent 1fba45b commit c12aedc

File tree

2 files changed

+5
-23
lines changed

2 files changed

+5
-23
lines changed

tests/unit/vertexai/genai/replays/test_evaluation_metric.py

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -24,13 +24,12 @@
2424

2525
def test_create_and_get_evaluation_metric(client):
2626
client._api_client._http_options.api_version = "v1beta1"
27-
client._api_client._http_options.base_url = (
28-
"https://us-central1-staging-aiplatform.sandbox.googleapis.com/"
29-
)
3027
result = client.evals.create_evaluation_metric(
3128
display_name="test_metric",
3229
description="test_description",
33-
metric=types.RubricMetric.GENERAL_QUALITY,
30+
metric=types.LLMMetric(
31+
name="custom_llm_metric", prompt_template="test_prompt_template"
32+
),
3433
)
3534
assert isinstance(result, str)
3635
assert re.match(
@@ -44,9 +43,6 @@ def test_create_and_get_evaluation_metric(client):
4443

4544
def test_list_evaluation_metrics(client):
4645
client._api_client._http_options.api_version = "v1beta1"
47-
client._api_client._http_options.base_url = (
48-
"https://us-central1-staging-aiplatform.sandbox.googleapis.com/"
49-
)
5046
response = client.evals.list_evaluation_metrics()
5147
assert isinstance(response, types.ListEvaluationMetricsResponse)
5248
assert len(response.evaluation_metrics) >= 0

vertexai/_genai/_transformers.py

Lines changed: 2 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -187,22 +187,8 @@ def t_metric_for_registry(
187187
if metric_name:
188188
metric_name = metric_name.lower()
189189

190-
# Handle standard computation metrics
191-
if metric_name == "exact_match":
192-
metric_payload_item["exact_match_spec"] = {}
193-
elif metric_name == "bleu":
194-
metric_payload_item["bleu_spec"] = {}
195-
elif metric_name and metric_name.startswith("rouge"):
196-
rouge_type = metric_name.replace("_", "")
197-
metric_payload_item["rouge_spec"] = {"rouge_type": rouge_type}
198-
# API Pre-defined metrics
199-
elif metric_name and metric_name in _evals_constant.SUPPORTED_PREDEFINED_METRICS:
200-
metric_payload_item["predefined_metric_spec"] = {
201-
"metric_spec_name": metric_name,
202-
"metric_spec_parameters": metric.metric_spec_parameters,
203-
}
204190
# Custom Code Execution Metric
205-
elif hasattr(metric, "remote_custom_function") and metric.remote_custom_function:
191+
if hasattr(metric, "remote_custom_function") and metric.remote_custom_function:
206192
metric_payload_item["custom_code_execution_spec"] = {
207193
"evaluation_function": metric.remote_custom_function
208194
}
@@ -217,7 +203,7 @@ def t_metric_for_registry(
217203
"evaluation_function": metric.custom_function
218204
}
219205

220-
# Map LLM-based metrics to the new llm_based_metric_spec
206+
# LLM-based metric
221207
elif (hasattr(metric, "prompt_template") and metric.prompt_template) or (
222208
hasattr(metric, "rubric_group_name") and metric.rubric_group_name
223209
):

0 commit comments

Comments
 (0)