diff --git a/src/pruna/evaluation/benchmarks.py b/src/pruna/evaluation/benchmarks.py index e52ae463..e3f58164 100644 --- a/src/pruna/evaluation/benchmarks.py +++ b/src/pruna/evaluation/benchmarks.py @@ -226,7 +226,7 @@ def list(cls, task_type: str | None = None) -> list[str]: "counting, colors, position, color attributes. Evaluates fine-grained alignment " "between prompts and generated images via VQA-style questions." ), - metrics=["clip_score"], # §3.2: Mask2Former; not in Pruna + metrics=["qa_accuracy", "clip_score"], # strict QA + CLIP score task_type="text_to_image", reference="https://arxiv.org/abs/2310.11513", ), @@ -272,13 +272,30 @@ def list(cls, task_type: str | None = None) -> list[str]: reference="https://arxiv.org/abs/2504.17761", ), Benchmark( - name="OneIG", - description=( - "Omni-dimensional benchmark for text-to-image evaluation. Six dataset categories " - "(Anime_Stylization, General_Object, Knowledge_Reasoning, Multilingualism, Portrait, " - "Text_Rendering) plus fine-grained style classes. Includes alignment questions." - ), - metrics=[], # Paper uses dimension-specific metrics; not in Pruna + name="OneIG Anime Stylization", + description="OneIG subset: anime and stylized imagery.", + metrics=["oneig_alignment"], + task_type="text_to_image", + reference="https://arxiv.org/abs/2506.07977", + ), + Benchmark( + name="OneIG General Object", + description="OneIG subset: everyday objects and scenes.", + metrics=["oneig_alignment"], + task_type="text_to_image", + reference="https://arxiv.org/abs/2506.07977", + ), + Benchmark( + name="OneIG Multilingualism", + description="OneIG subset: multilingual prompts (incl. Chinese splits).", + metrics=["oneig_alignment"], + task_type="text_to_image", + reference="https://arxiv.org/abs/2506.07977", + ), + Benchmark( + name="OneIG Portrait", + description="OneIG subset: people and portraits.", + metrics=["oneig_alignment"], task_type="text_to_image", reference="https://arxiv.org/abs/2506.07977", ), diff --git a/src/pruna/evaluation/metrics/metric_oneig_alignment.py b/src/pruna/evaluation/metrics/metric_oneig_alignment.py new file mode 100644 index 00000000..ad443283 --- /dev/null +++ b/src/pruna/evaluation/metrics/metric_oneig_alignment.py @@ -0,0 +1,234 @@ +# Copyright 2025 - Pruna AI GmbH. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""OneIG alignment scoring with dependency masking (parent ``No`` gates children).""" + +from __future__ import annotations + +from typing import Any, Mapping + +import torch + +from pruna.evaluation.metrics.metric_qa_accuracy import QAAccuracyMetric +from pruna.evaluation.metrics.registry import MetricRegistry +from pruna.evaluation.metrics.utils import metric_data_processor +from pruna.evaluation.metrics.vlm_utils import _process_images + + +def _int_dict_keys(mapping: Mapping[Any, Any]) -> dict[int, Any]: + return {int(k): v for k, v in mapping.items()} + + +def _normalize_dependencies(deps: Any) -> dict[int, list[int]]: + if not isinstance(deps, Mapping): + return {} + out: dict[int, list[int]] = {} + for k, v in deps.items(): + key = int(k) + if isinstance(v, list): + out[key] = [int(p) for p in v] + else: + out[key] = [] + return out + + +def _active_oneig_question_ids(qmap: dict[int, Any]) -> list[int]: + """Question ids with real prompt text (excludes HF ``datasets`` padding and empty slots).""" + active: list[int] = [] + for qi in sorted(qmap): + text = qmap[qi] + if text is None: + continue + s = str(text).strip() + if not s or s == "None": + continue + active.append(qi) + return active + + +def apply_oneig_dependency_mask( + raw_scores: Mapping[int, float], + dependencies: Mapping[int, list[int]], +) -> dict[int, float]: + """ + Apply OneIG ``filter_score`` logic per dependency graph (single grid cell). + + Parents with semantic answer ``No`` (score ``0``) force dependent question + scores to ``0``. Parent id ``0`` is ignored, matching the reference script. + + Parameters + ---------- + raw_scores : Mapping[int, float] + Map question id → VLM score in ``{0, 1}`` (or float) before masking. + dependencies : Mapping[int, list[int]] + Map child question id → list of parent question ids (use ``[0]`` for roots). + + Returns + ------- + dict[int, float] + Copy of scores with dependent questions zeroed when any non-zero parent + scored ``0``. + """ + filtered = {int(k): float(v) for k, v in raw_scores.items()} + deps = _normalize_dependencies(dependencies) + raw = dict(filtered) + for child_id, parent_ids in deps.items(): + if child_id not in filtered: + continue + any_parent_no = False + for parent_id in parent_ids: + if parent_id == 0: + continue + if parent_id not in raw: + continue + if raw[parent_id] == 0.0: + any_parent_no = True + break + if any_parent_no: + filtered[child_id] = 0.0 + return filtered + + +def aggregate_oneig_alignment_per_cell(filtered_scores: Mapping[int, float], question_ids: list[int]) -> float: + """ + Mean filtered score over all questions in the prompt (one grid cell). + + Parameters + ---------- + filtered_scores : Mapping[int, float] + Post-mask scores for each question id. + question_ids : list[int] + Ordered ids (typically sorted ascending) defining the denominator. + + Returns + ------- + float + Average score in ``[0, 1]`` if inputs are binary; ``0.0`` if ``question_ids`` is empty. + """ + if not question_ids: + return 0.0 + s = sum(float(filtered_scores[qid]) for qid in question_ids) + return s / float(len(question_ids)) + + +@MetricRegistry.register("oneig_alignment") +class OneIGAlignmentMetric(QAAccuracyMetric): + """ + OneIG alignment with dependency-aware aggregation. + + Reuses :class:`QAAccuracyMetric` VLM Yes/No scoring but aggregates like + ``OneIG-Benchmark`` ``alignment_score.py`` for a **single** grid cell (no + ``split_mxn_grid``): question ids are sorted numerically, raw scores are + masked when any non-root parent is ``No``, then the mean over all questions + is stored per image. Entries with null or blank question text (HF ``datasets`` + schema padding) are omitted from scoring. + + Numerical parity with upstream also depends on the VLM (e.g. ``openai/gpt-4o`` via + litellm vs reference Qwen2.5-VL). + + Parameters + ---------- + *args : Any + Additional positional arguments for :class:`QAAccuracyMetric`. + vlm : BaseVLM | None, optional + Custom VLM instance. If provided, ``vlm_type`` and ``model_name`` are ignored. + vlm_type : {"litellm", "transformers"}, optional + VLM backend. Default is ``"litellm"``. + model_name : str | None, optional + Litellm model id or HuggingFace checkpoint id. **Required** when ``vlm`` is not + provided (e.g. ``openai/gpt-4o``). + vlm_kwargs : dict, optional + Forwarded by ``get_vlm`` to ``LitellmVLM`` or ``TransformersVLM``. For local models, + set ``model_load_kwargs`` for ``from_pretrained``; for litellm, pass extra API options. + structured_output : bool, optional + Use structured generation (litellm pydantic; transformers outlines when applicable). + Default is True. + device : str | torch.device | None, optional + Device for transformers VLM. + api_key : str | None, optional + API key for litellm. + call_type : str, optional + Call type for the metric. + **kwargs : Any + Additional keyword arguments for :class:`QAAccuracyMetric`. + + Examples + -------- + Same ``hosted`` / ``local`` pattern as ``QAAccuracyMetric`` and + :func:`~pruna.evaluation.metrics.vlm_base.get_vlm`: + + .. code-block:: python + + import torch + + from pruna.evaluation.metrics import OneIGAlignmentMetric + + hosted = OneIGAlignmentMetric(vlm_type="litellm", model_name="openai/gpt-4o") + local = OneIGAlignmentMetric( + vlm_type="transformers", + model_name="HuggingFaceTB/SmolVLM-256M-Instruct", + device="cpu", + vlm_kwargs={"model_load_kwargs": {"torch_dtype": torch.float32}}, + ) + """ + + metric_name: str = "oneig_alignment" + metric_units: str = "alignment" + + def update(self, x: list[Any] | torch.Tensor, gt: torch.Tensor, outputs: torch.Tensor) -> None: + """ + Score each question with the VLM, apply dependency masking, append per-cell mean. + + Parameters + ---------- + x : list[Any] | torch.Tensor + Unused batch metadata (kept for metric interface). + gt : torch.Tensor + Ground-truth slot holding per-sample aux dicts with ``questions`` and + optionally ``dependencies``. + outputs : torch.Tensor + Model outputs (images) evaluated against the questions. + """ + inputs = metric_data_processor(x, gt, outputs, self.call_type) + images = _process_images(inputs[0]) + aux_list = inputs[1] if len(inputs) > 1 else [] + if isinstance(aux_list, torch.Tensor): + aux_list = aux_list.tolist() + for i, image in enumerate(images): + aux = aux_list[i] if i < len(aux_list) else {} + if not isinstance(aux, dict): + raise ValueError( + "oneig_alignment requires aux[{}] to be a dict with 'questions'. Got: {!r}.".format(i, type(aux)) + ) + qs = aux.get("questions") + if not isinstance(qs, dict) or not qs: + raise ValueError( + f"oneig_alignment requires 'questions' as a non-empty dict on aux. Got keys: {list(aux.keys())}." + ) + qmap = _int_dict_keys(qs) + qids = _active_oneig_question_ids(qmap) + if not qids: + self.scores.append(0.0) + continue + question_texts = [str(qmap[qi]) for qi in qids] + deps = _normalize_dependencies(aux.get("dependencies", {})) + raw_scores_list = self.vlm.score( + [image] * len(question_texts), + question_texts, + ["Yes"] * len(question_texts), + response_format=self.response_format, + ) + raw_map = {qid: float(raw_scores_list[j]) for j, qid in enumerate(qids)} + filtered = apply_oneig_dependency_mask(raw_map, deps) + self.scores.append(aggregate_oneig_alignment_per_cell(filtered, qids)) diff --git a/src/pruna/evaluation/metrics/metric_qa_accuracy.py b/src/pruna/evaluation/metrics/metric_qa_accuracy.py new file mode 100644 index 00000000..6dd36c2f --- /dev/null +++ b/src/pruna/evaluation/metrics/metric_qa_accuracy.py @@ -0,0 +1,204 @@ +# Copyright 2025 - Pruna AI GmbH. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""QA Accuracy metric using VLM for image understanding evaluation.""" + +from __future__ import annotations + +from typing import Any, Literal + +import numpy as np +import torch + +from pruna.evaluation.metrics.registry import MetricRegistry +from pruna.evaluation.metrics.result import MetricResult +from pruna.evaluation.metrics.utils import ( + SINGLE, + metric_data_processor, +) +from pruna.evaluation.metrics.vlm_base import BaseVLM, StatefulVLMMeanScoresMetric +from pruna.evaluation.metrics.vlm_utils import VQAnswer, _process_images + + +@MetricRegistry.register("qa_accuracy") +class QAAccuracyMetric(StatefulVLMMeanScoresMetric): + """ + QA Accuracy metric. + + Uses a VLM to score yes/no alignment between each question and the generated image. + Higher scores indicate better image understanding. + + **Multiple questions** come from each auxiliary dict's ``questions`` mapping (e.g. GenEval + atomic probes, OneIG items). Each question is scored independently via :meth:`BaseVLM.score` + with expected answer ``"Yes"``. + + **Aggregation** (``aggregation`` kwarg): + + - ``mean`` (default): per image, average VLM scores over all questions; the metric's + :meth:`compute` returns the mean of those per-image values across ``update`` calls. + - ``all_or_nothing``: per image, ``1.0`` only if **every** question scores strictly above + ``0.5`` (scores equal to ``0.5`` count as failure). This matches strict GenEval-style + reporting (all atomic checks must pass per sample; see `GenEval + `_). :class:`~pruna.evaluation.task.Task` wires this for + the GenEval benchmark. + + Parameters + ---------- + *args : Any + Additional positional arguments. + vlm : BaseVLM | None, optional + Custom VLM instance. If provided, ``vlm_type`` and ``model_name`` are ignored. + vlm_type : {"litellm", "transformers"}, optional + VLM backend. Default is "litellm". + model_name : str | None, optional + Litellm model id or HuggingFace checkpoint id. **Required** when ``vlm`` is not + provided (e.g. ``openai/gpt-4o``). + vlm_kwargs : dict, optional + Forwarded by ``get_vlm`` to ``LitellmVLM`` or ``TransformersVLM``. For local models, + set ``model_load_kwargs`` for ``from_pretrained``; for litellm, pass extra API options. + structured_output : bool, optional + Use structured generation (litellm pydantic; transformers outlines when applicable). + Default is True. + device : str | torch.device | None, optional + Device for transformers VLM. + api_key : str | None, optional + API key for litellm. + call_type : str, optional + Call type for the metric. + **kwargs : Any + Supports ``aggregation``: ``"mean"`` or ``"all_or_nothing"``. + + Raises + ------ + ValueError + If ``aggregation`` is not ``"mean"`` or ``"all_or_nothing"``. + + Examples + -------- + Same ``hosted`` / ``local`` pattern as :func:`~pruna.evaluation.metrics.vlm_base.get_vlm`: + + .. code-block:: python + + import torch + + from pruna.evaluation.metrics import QAAccuracyMetric + + hosted = QAAccuracyMetric(vlm_type="litellm", model_name="openai/gpt-4o") + local = QAAccuracyMetric( + vlm_type="transformers", + model_name="HuggingFaceTB/SmolVLM-256M-Instruct", + device="cpu", + vlm_kwargs={"model_load_kwargs": {"torch_dtype": torch.float32}}, + ) + """ + + scores: list[float] + default_call_type: str = "y_gt" + higher_is_better: bool = True + metric_units: str = "accuracy" + metric_name: str = "qa_accuracy" + + def __init__( + self, + *args, + vlm: BaseVLM | None = None, + vlm_type: Literal["litellm", "transformers"] = "litellm", + model_name: str | None = None, + vlm_kwargs: dict | None = None, + structured_output: bool = True, + device: str | torch.device | None = None, + api_key: str | None = None, + call_type: str = SINGLE, + **kwargs: Any, + ) -> None: + super().__init__(device=device) + self.response_format = VQAnswer if structured_output else None + self.aggregation = kwargs.pop("aggregation", "mean") + if self.aggregation not in {"mean", "all_or_nothing"}: + raise ValueError( + f"qa_accuracy aggregation must be one of {{'mean', 'all_or_nothing'}}. Got: {self.aggregation!r}." + ) + self.metric_units = type(self).metric_units + self._init_vlm_scores( + vlm=vlm, + vlm_type=vlm_type, + model_name=model_name, + vlm_kwargs=vlm_kwargs, + structured_output=structured_output, + device=device, + api_key=api_key, + call_type=call_type, + ) + + def _extract_questions(self, gt: Any, n: int) -> list[list[str]]: + if isinstance(gt, (list, tuple)) and len(gt) >= n: + out = [] + for i in range(n): + v = gt[i] + if isinstance(v, dict) and "questions" in v: + qs = v["questions"] + out.append(list(qs.values()) if isinstance(qs, dict) else list(qs)) + else: + out.append([]) + return out + return [[] for _ in range(n)] + + def update(self, x: list[Any] | torch.Tensor, gt: torch.Tensor, outputs: torch.Tensor) -> None: + """ + Update the metric with new batch data. + + Parameters + ---------- + x : list[Any] | torch.Tensor + The input data. + gt : torch.Tensor + The ground truth (questions per image). + outputs : torch.Tensor + The output images. + """ + inputs = metric_data_processor(x, gt, outputs, self.call_type) + images = _process_images(inputs[0]) + auxiliaries = inputs[1] if len(inputs) > 1 else [] + questions_per_image = self._extract_questions(auxiliaries, len(images)) + for i, image in enumerate(images): + questions = questions_per_image[i] if i < len(questions_per_image) else [] + if not questions: + aux = auxiliaries[i] if i < len(auxiliaries) else {} + raise ValueError( + "qa_accuracy requires 'questions' in auxiliaries. " + "Use a benchmark that provides it (e.g. GenEval, DPG, OneIG). " + f"Got aux keys: {list(aux.keys()) if isinstance(aux, dict) else 'not a dict'}." + ) + scores = self.vlm.score( + [image] * len(questions), + questions, + ["Yes"] * len(questions), + response_format=self.response_format, + ) + if self.aggregation == "all_or_nothing": + score = 1.0 if all(s > 0.5 for s in scores) else 0.0 + else: + score = float(np.mean(scores)) + self.scores.append(score) + + def compute(self) -> MetricResult: + """ + Compute the QA accuracy score. + + Returns + ------- + MetricResult + The mean QA accuracy across all updates. + """ + return self.compute_mean_of_scores() diff --git a/tests/evaluation/test_text_metrics.py b/tests/evaluation/test_text_metrics.py new file mode 100644 index 00000000..12705e91 --- /dev/null +++ b/tests/evaluation/test_text_metrics.py @@ -0,0 +1,136 @@ +"""Tests for OneIG alignment masking and wiring.""" + +from unittest.mock import MagicMock + +import pytest +import torch + +from pruna.data.datasets.prompt import _to_oneig_record +from pruna.evaluation.metrics.metric_oneig_alignment import ( + OneIGAlignmentMetric, + _active_oneig_question_ids, + aggregate_oneig_alignment_per_cell, + apply_oneig_dependency_mask, +) +from pruna.evaluation.metrics.vlm_base import BaseVLM + + +def test_apply_oneig_dependency_mask_parent_no_zeros_child() -> None: + """Parent ``No`` forces dependent question score to zero.""" + raw = {1: 0.0, 2: 1.0} + deps = {1: [0], 2: [1]} + out = apply_oneig_dependency_mask(raw, deps) + assert out[1] == 0.0 + assert out[2] == 0.0 + assert aggregate_oneig_alignment_per_cell(out, [1, 2]) == 0.0 + + +def test_apply_oneig_dependency_mask_parent_yes_keeps_child() -> None: + """All ``Yes`` yields nonzero child and mean 1.0 over two questions.""" + raw = {1: 1.0, 2: 1.0} + deps = {1: [0], 2: [1]} + out = apply_oneig_dependency_mask(raw, deps) + assert out == {1: 1.0, 2: 1.0} + assert aggregate_oneig_alignment_per_cell(out, [1, 2]) == 1.0 + + +def test_apply_oneig_dependency_mask_uses_raw_parent_not_filtered_for_chain() -> None: + r"""Grandchild may stay 1 when parent's **raw** VLM score is Yes even if parent was masked to 0.""" + raw = {1: 0.0, 2: 1.0, 3: 1.0} + deps = {1: [0], 2: [1], 3: [2]} + out = apply_oneig_dependency_mask(raw, deps) + assert out[1] == 0.0 + assert out[2] == 0.0 + assert out[3] == 1.0 + + +def test_apply_oneig_dependency_mask_grandchild_chain() -> None: + """3-level chain: grandparent No masks parent; grandchild uses raw parent (stays 1.0).""" + raw_scores = {1: 0.0, 2: 1.0, 3: 1.0} + dependencies = {2: [1], 3: [2]} + filtered = apply_oneig_dependency_mask(raw_scores, dependencies) + assert filtered[2] == 0.0 + assert filtered[3] == 1.0 + assert filtered[1] == 0.0 + + +def test_active_oneig_question_ids_skips_padding() -> None: + """Padded ``None`` and blank slots are excluded; numeric order preserved.""" + qmap = {1: "a", 21: None, 3: " ", 2: "b"} + assert _active_oneig_question_ids(qmap) == [1, 2] + + +def test_active_oneig_question_ids_skips_literal_none_string() -> None: + r"""The literal ``\"None\"`` string is treated as a missing label (legacy / bad rows).""" + assert _active_oneig_question_ids({1: "None", 2: "ok"}) == [2] + + +@pytest.mark.cpu +def test_oneig_alignment_metric_respects_question_id_order() -> None: + """Questions are scored in numeric id order; masking uses aligned raw scores.""" + mock_vlm = MagicMock(spec=BaseVLM) + mock_vlm.score.return_value = [0.0, 1.0] + + metric = OneIGAlignmentMetric(vlm=mock_vlm, vlm_type="litellm", device="cpu") + images = torch.rand(1, 3, 64, 64) + aux = { + "questions": {"2": "second", "1": "first"}, + "dependencies": {"1": [0], "2": [1]}, + } + metric.update(["p"], [aux], images) + result = metric.compute() + assert result.name == "oneig_alignment" + assert result.higher_is_better is True + assert result.metric_units == "alignment" + assert result.result == 0.0 + call = mock_vlm.score.call_args + assert call[0][1] == ["first", "second"] + + +@pytest.mark.cpu +def test_oneig_alignment_skips_none_question_texts() -> None: + """HF ``datasets`` schema padding (``None`` question text) is not sent to the VLM.""" + mock_vlm = MagicMock(spec=BaseVLM) + mock_vlm.score.return_value = [1.0] + + metric = OneIGAlignmentMetric(vlm=mock_vlm, vlm_type="litellm", device="cpu") + images = torch.rand(1, 3, 64, 64) + aux = { + "questions": {"1": "first", "21": None}, + "dependencies": {"1": [0], "21": [0]}, + } + metric.update(["p"], [aux], images) + result = metric.compute() + assert result.name == "oneig_alignment" + assert result.result == 1.0 + mock_vlm.score.assert_called_once() + assert mock_vlm.score.call_args[0][1] == ["first"] + + +@pytest.mark.cpu +def test_oneig_alignment_all_padding_questions_yields_zero_without_vlm() -> None: + """When every slot is padding, score is 0.0 and the VLM is not called.""" + mock_vlm = MagicMock(spec=BaseVLM) + metric = OneIGAlignmentMetric(vlm=mock_vlm, vlm_type="litellm", device="cpu") + aux = {"questions": {"1": None, "2": None}, "dependencies": {}} + metric.update(["p"], [aux], torch.rand(1, 3, 64, 64)) + assert metric.compute().result == 0.0 + mock_vlm.score.assert_not_called() + + +def test_to_oneig_record_strips_null_questions_and_dependencies() -> None: + """Null-valued Q_D entries are filtered out at record construction time.""" + row = {"category": "Anime_Stylization", "id": "001", "class": "None", "prompt_en": "a cat"} + questions_by_key = { + "anime_001": { + "questions": {"1": "Is there a cat?", "21": None}, + "dependencies": {"1": [0], "21": None}, + } + } + record = _to_oneig_record(row, questions_by_key, {}, {}) + assert "21" not in record["questions"] + assert "21" not in record["dependencies"] + assert record["questions"] == {"1": "Is there a cat?"} + assert record["dependencies"] == {"1": [0]} + +