|
| 1 | +"""Custom judge tools for adaptive evaluation criteria loading. |
| 2 | +
|
| 3 | +Implements ``ReadSkillTool`` and ``ReadSkillReferenceTool`` from the MLflow |
| 4 | +#21255 design spec, registered in MLflow's global ``JudgeToolRegistry`` so |
| 5 | +they are available to any trace-based judge. |
| 6 | +
|
| 7 | +Key difference from spec: tools accept ``trace: Trace`` (required by the |
| 8 | +``JudgeTool`` interface) but use the internal ``EvalCriteriaSet`` for skill |
| 9 | +lookup. When the native ``make_judge(skills=[...])`` API lands, replace |
| 10 | +this module with MLflow's built-in skill tools which route via type |
| 11 | +annotation. |
| 12 | +
|
| 13 | +Registry invocation flow:: |
| 14 | +
|
| 15 | + registry.invoke(tool_call, trace) |
| 16 | + → json.loads(tool_call.function.arguments) |
| 17 | + → tool.invoke(trace, **parsed_args) |
| 18 | +""" |
| 19 | + |
| 20 | +from __future__ import annotations |
| 21 | + |
| 22 | +import logging |
| 23 | +import os |
| 24 | +from typing import Any |
| 25 | + |
| 26 | +from mlflow.entities.trace import Trace |
| 27 | +from mlflow.genai.judges.tools.base import JudgeTool |
| 28 | +from mlflow.genai.judges.tools.registry import register_judge_tool |
| 29 | +from mlflow.types.llm import FunctionToolDefinition, ParamProperty, ToolDefinition, ToolParamsSchema |
| 30 | + |
| 31 | +from .eval_criteria import EvalCriteriaSet |
| 32 | + |
| 33 | +logger = logging.getLogger(__name__) |
| 34 | + |
| 35 | + |
| 36 | +class ReadEvalCriteriaTool(JudgeTool): |
| 37 | + """Read the full body of an evaluation-criteria skill. |
| 38 | +
|
| 39 | + The judge calls this tool when a criteria's description matches the |
| 40 | + trace it is evaluating. |
| 41 | + """ |
| 42 | + |
| 43 | + def __init__(self, criteria_set: EvalCriteriaSet): |
| 44 | + self._criteria_set = criteria_set |
| 45 | + |
| 46 | + @property |
| 47 | + def name(self) -> str: |
| 48 | + return "read_eval_criteria" |
| 49 | + |
| 50 | + def get_definition(self) -> ToolDefinition: |
| 51 | + available = self._criteria_set.names |
| 52 | + return ToolDefinition( |
| 53 | + function=FunctionToolDefinition( |
| 54 | + name="read_eval_criteria", |
| 55 | + description=( |
| 56 | + "Read the full content of an evaluation criteria skill to get domain-specific " |
| 57 | + "rubrics, scoring rules, and reference material. Use this when a criteria's " |
| 58 | + f"description matches the trace content. Available criteria: {available}" |
| 59 | + ), |
| 60 | + parameters=ToolParamsSchema( |
| 61 | + properties={ |
| 62 | + "skill_name": ParamProperty( |
| 63 | + type="string", description="Name of the evaluation criteria to read" |
| 64 | + ), |
| 65 | + }, |
| 66 | + ), |
| 67 | + ), |
| 68 | + ) |
| 69 | + |
| 70 | + def invoke(self, trace: Trace, skill_name: str) -> str: |
| 71 | + skill = self._criteria_set.get_skill(skill_name) |
| 72 | + if not skill: |
| 73 | + available = self._criteria_set.names |
| 74 | + return f"Error: No criteria named '{skill_name}'. Available: {available}" |
| 75 | + return skill.body |
| 76 | + |
| 77 | + |
| 78 | +class ReadEvalReferenceTool(JudgeTool): |
| 79 | + """Read a reference document from a criteria's ``references/`` directory. |
| 80 | +
|
| 81 | + Used for detailed rubrics, edge cases, and scoring examples. |
| 82 | + """ |
| 83 | + |
| 84 | + def __init__(self, criteria_set: EvalCriteriaSet): |
| 85 | + self._criteria_set = criteria_set |
| 86 | + |
| 87 | + @property |
| 88 | + def name(self) -> str: |
| 89 | + return "read_eval_reference" |
| 90 | + |
| 91 | + def get_definition(self) -> ToolDefinition: |
| 92 | + return ToolDefinition( |
| 93 | + function=FunctionToolDefinition( |
| 94 | + name="read_eval_reference", |
| 95 | + description=( |
| 96 | + "Read a reference document from an evaluation criteria skill for detailed " |
| 97 | + "rubrics, edge cases, or scoring examples." |
| 98 | + ), |
| 99 | + parameters=ToolParamsSchema( |
| 100 | + properties={ |
| 101 | + "skill_name": ParamProperty(type="string", description="Name of the evaluation criteria"), |
| 102 | + "file_path": ParamProperty( |
| 103 | + type="string", |
| 104 | + description="Relative path within the skill (e.g., 'references/RUBRIC.md')", |
| 105 | + ), |
| 106 | + }, |
| 107 | + ), |
| 108 | + ), |
| 109 | + ) |
| 110 | + |
| 111 | + def invoke(self, trace: Trace, skill_name: str, file_path: str) -> str: |
| 112 | + skill = self._criteria_set.get_skill(skill_name) |
| 113 | + if not skill: |
| 114 | + available = self._criteria_set.names |
| 115 | + return f"Error: No criteria named '{skill_name}'. Available: {available}" |
| 116 | + normalized = os.path.normpath(file_path) |
| 117 | + if normalized.startswith("..") or os.path.isabs(normalized): |
| 118 | + return f"Error: Invalid file path '{file_path}'. Must be relative." |
| 119 | + if normalized not in skill.references: |
| 120 | + return f"Error: File '{file_path}' not found in '{skill_name}'" |
| 121 | + return skill.references[normalized] |
| 122 | + |
| 123 | + |
| 124 | +_registered = False |
| 125 | + |
| 126 | + |
| 127 | +def register_eval_tools(criteria_set: EvalCriteriaSet) -> None: |
| 128 | + """Register eval-criteria tools in MLflow's global ``JudgeToolRegistry``. |
| 129 | +
|
| 130 | + Safe to call multiple times — tools are registered only once per process. |
| 131 | + """ |
| 132 | + global _registered |
| 133 | + if _registered: |
| 134 | + return |
| 135 | + if not criteria_set.skills: |
| 136 | + logger.debug("No eval criteria loaded; skipping tool registration") |
| 137 | + return |
| 138 | + register_judge_tool(ReadEvalCriteriaTool(criteria_set)) |
| 139 | + register_judge_tool(ReadEvalReferenceTool(criteria_set)) |
| 140 | + _registered = True |
| 141 | + logger.info("Registered eval criteria judge tools (%d criteria available)", len(criteria_set.skills)) |
0 commit comments