Skip to content

Commit 64e5ae1

Browse files
akshatsinha0pre-commit-ci[bot]ericspod
authored
feat(metrics): Add MAPEMetric for regression evaluation. (#8686)
added a useful regression metrics. ### Description A few sentences describing the changes proposed in this pull request. ### Types of changes <!--- Put an `x` in all the boxes that apply, and remove the not applicable items --> - [x] Non-breaking change (fix or new feature that would not break existing functionality). - [ ] Breaking change (fix or new feature that would cause existing functionality to change). - [ ] New tests added to cover the changes. - [x] Integration tests passed locally by running `./runtests.sh -f -u --net --coverage`. - [x] Quick tests passed locally by running `./runtests.sh --quick --unittests --disttests`. - [ ] In-line docstrings updated. - [ ] Documentation updated, tested `make html` command in the `docs/` folder. --------- Signed-off-by: Akshat Sinha <akshatsinhasramhardy@gmail.com> Signed-off-by: Akshat Sinha Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Eric Kerfoot <17726042+ericspod@users.noreply.github.com>
1 parent 2be2bed commit 64e5ae1

File tree

4 files changed

+71
-10
lines changed

4 files changed

+71
-10
lines changed

docs/source/metrics.rst

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -143,6 +143,11 @@ Metrics
143143
.. autoclass:: PSNRMetric
144144
:members:
145145

146+
`Mean absolute percentage error`
147+
---------------------------------
148+
.. autoclass:: MAPEMetric
149+
:members:
150+
146151
`Structural similarity index measure`
147152
-------------------------------------
148153
.. autoclass:: monai.metrics.regression.SSIMMetric

monai/metrics/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@
2828
from .panoptic_quality import PanopticQualityMetric, compute_panoptic_quality
2929
from .regression import (
3030
MAEMetric,
31+
MAPEMetric,
3132
MSEMetric,
3233
MultiScaleSSIMMetric,
3334
PSNRMetric,

monai/metrics/regression.py

Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -143,6 +143,39 @@ def _compute_metric(self, y_pred: torch.Tensor, y: torch.Tensor) -> torch.Tensor
143143
return compute_mean_error_metrics(y_pred, y, func=self.abs_func)
144144

145145

146+
class MAPEMetric(RegressionMetric):
147+
r"""Compute Mean Absolute Percentage Error between two tensors using function:
148+
149+
.. math::
150+
\operatorname {MAPE}\left(Y, \hat{Y}\right) =\frac {100}{n}\sum _{i=1}^{n}\left|\frac{y_i-\hat{y_i}}{y_i}\right|.
151+
152+
More info: https://en.wikipedia.org/wiki/Mean_absolute_percentage_error
153+
154+
Input `y_pred` is compared with ground truth `y`.
155+
Both `y_pred` and `y` are expected to be real-valued, where `y_pred` is output from a regression model.
156+
Note: Tackling the undefined error, a tiny epsilon value is added to the denominator part.
157+
158+
Example of the typical execution steps of this metric class follows :py:class:`monai.metrics.metric.Cumulative`.
159+
160+
Args:
161+
reduction: define the mode to reduce metrics, will only execute reduction on `not-nan` values,
162+
available reduction modes: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
163+
``"mean_channel"``, ``"sum_channel"``}, default to ``"mean"``. if "none", will not do reduction.
164+
get_not_nans: whether to return the `not_nans` count, if True, aggregate() returns (metric, not_nans).
165+
epsilon: float. Defaults to 1e-7.
166+
167+
"""
168+
169+
def __init__(
170+
self, reduction: MetricReduction | str = MetricReduction.MEAN, get_not_nans: bool = False, epsilon: float = 1e-7
171+
) -> None:
172+
super().__init__(reduction=reduction, get_not_nans=get_not_nans)
173+
self.epsilon = epsilon
174+
175+
def _compute_metric(self, y_pred: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
176+
return compute_mape_metric(y_pred, y, epsilon=self.epsilon)
177+
178+
146179
class RMSEMetric(RegressionMetric):
147180
r"""Compute Root Mean Squared Error between two tensors using function:
148181
@@ -220,6 +253,23 @@ def compute_mean_error_metrics(y_pred: torch.Tensor, y: torch.Tensor, func: Call
220253
return torch.mean(flt(func(y - y_pred)), dim=-1, keepdim=True)
221254

222255

256+
def compute_mape_metric(y_pred: torch.Tensor, y: torch.Tensor, epsilon: float = 1e-7) -> torch.Tensor:
257+
"""
258+
Compute Mean Absolute Percentage Error.
259+
260+
Args:
261+
y_pred: predicted values
262+
y: ground truth values
263+
epsilon: small value to avoid division by zero
264+
265+
Returns:
266+
MAPE value as percentage
267+
"""
268+
flt = partial(torch.flatten, start_dim=1)
269+
percentage_error = torch.abs(y - y_pred) / torch.clamp(torch.abs(y), min=epsilon) * 100.0
270+
return torch.mean(flt(percentage_error), dim=-1, keepdim=True)
271+
272+
223273
class KernelType(StrEnum):
224274
GAUSSIAN = "gaussian"
225275
UNIFORM = "uniform"

tests/metrics/test_compute_regression_metrics.py

Lines changed: 15 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
import numpy as np
1818
import torch
1919

20-
from monai.metrics import MAEMetric, MSEMetric, PSNRMetric, RMSEMetric
20+
from monai.metrics import MAEMetric, MAPEMetric, MSEMetric, PSNRMetric, RMSEMetric
2121
from monai.utils import set_determinism
2222

2323

@@ -44,14 +44,19 @@ def psnrmetric_np(max_val, y_pred, y):
4444
return np.mean(20 * np.log10(max_val) - 10 * np.log10(mse))
4545

4646

47+
def mapemetric_np(y_pred, y, epsilon=1e-7):
48+
percentage_error = np.abs(y - y_pred) / np.clip(np.abs(y), a_min=epsilon, a_max=None) * 100.0
49+
return np.mean(flatten(percentage_error))
50+
51+
4752
class TestRegressionMetrics(unittest.TestCase):
4853

4954
def test_shape_reduction(self):
5055
set_determinism(seed=123)
5156
device = "cuda" if torch.cuda.is_available() else "cpu"
5257

5358
# regression metrics to check
54-
metrics = [MSEMetric, MAEMetric, RMSEMetric, partial(PSNRMetric, max_val=1.0)]
59+
metrics = [MSEMetric, MAEMetric, MAPEMetric, RMSEMetric, partial(PSNRMetric, max_val=1.0)]
5560

5661
# define variations in batch/base_dims/spatial_dims
5762
batch_dims = [1, 2, 4, 16]
@@ -94,8 +99,8 @@ def test_compare_numpy(self):
9499
device = "cuda" if torch.cuda.is_available() else "cpu"
95100

96101
# regression metrics to check + truth metric function in numpy
97-
metrics = [MSEMetric, MAEMetric, RMSEMetric, partial(PSNRMetric, max_val=1.0)]
98-
metrics_np = [msemetric_np, maemetric_np, rmsemetric_np, partial(psnrmetric_np, max_val=1.0)]
102+
metrics = [MSEMetric, MAEMetric, MAPEMetric, RMSEMetric, partial(PSNRMetric, max_val=1.0)]
103+
metrics_np = [msemetric_np, maemetric_np, mapemetric_np, rmsemetric_np, partial(psnrmetric_np, max_val=1.0)]
99104

100105
# define variations in batch/base_dims/spatial_dims
101106
batch_dims = [1, 2, 4, 16]
@@ -117,14 +122,14 @@ def test_compare_numpy(self):
117122
out_tensor = mt.aggregate(reduction="mean")
118123
out_np = mt_fn_np(y_pred=in_tensor_a.cpu().numpy(), y=in_tensor_b.cpu().numpy())
119124

120-
np.testing.assert_allclose(out_tensor.cpu().numpy(), out_np, atol=1e-4)
125+
np.testing.assert_allclose(out_tensor.cpu().numpy(), out_np, atol=1e-3, rtol=1e-4)
121126

122127
def test_ill_shape(self):
123128
set_determinism(seed=123)
124129
device = "cuda" if torch.cuda.is_available() else "cpu"
125130

126131
# regression metrics to check + truth metric function in numpy
127-
metrics = [MSEMetric, MAEMetric, RMSEMetric, partial(PSNRMetric, max_val=1.0)]
132+
metrics = [MSEMetric, MAEMetric, MAPEMetric, RMSEMetric, partial(PSNRMetric, max_val=1.0)]
128133
basedim = 10
129134

130135
# too small shape
@@ -143,8 +148,8 @@ def test_ill_shape(self):
143148
def test_same_input(self):
144149
set_determinism(seed=123)
145150
device = "cuda" if torch.cuda.is_available() else "cpu"
146-
metrics = [MSEMetric, MAEMetric, RMSEMetric, partial(PSNRMetric, max_val=1.0)]
147-
results = [0.0, 0.0, 0.0, float("inf")]
151+
metrics = [MSEMetric, MAEMetric, MAPEMetric, RMSEMetric, partial(PSNRMetric, max_val=1.0)]
152+
results = [0.0, 0.0, 0.0, 0.0, float("inf")]
148153

149154
# define variations in batch/base_dims/spatial_dims
150155
batch_dims = [1, 2, 4, 16]
@@ -168,8 +173,8 @@ def test_same_input(self):
168173
def test_diff_input(self):
169174
set_determinism(seed=123)
170175
device = "cuda" if torch.cuda.is_available() else "cpu"
171-
metrics = [MSEMetric, MAEMetric, RMSEMetric, partial(PSNRMetric, max_val=1.0)]
172-
results = [1.0, 1.0, 1.0, 0.0]
176+
metrics = [MSEMetric, MAEMetric, MAPEMetric, RMSEMetric, partial(PSNRMetric, max_val=1.0)]
177+
results = [1.0, 1.0, 100.0, 1.0, 0.0]
173178

174179
# define variations in batch/base_dims/spatial_dims
175180
batch_dims = [1, 2, 4, 16]

0 commit comments

Comments
 (0)