Skip to content

Commit 7d13791

Browse files
Ninja91facebook-github-bot
authored andcommitted
Add a16w8 reduce_sum FVP coverage for Ethos-U85 (pytorch#19319)
Summary: Adds an a16w8 (int16 IO + int8 weights) sweep for `aten.sum.dim_IntList` reducing the last dim with `keepdim=True`. The new tests `test_sum_dim_intlist_a16w8_{u55,u85}_INT` run on the standard Corstone-300 / Corstone-320 FVP harness. The U85 case surfaces a known numerics issue in the Vela `regor` lowering at int16 IO precision (silent zero output), tracked upstream at https://gitlab.arm.com/artificial-intelligence/ethos-u/ethos-u-vela/-/issues/23. The Ethos-U55 path uses a different accumulator and is correct on the same OFM rescale. Also annotates the four `dim_None{,_4d_tensor}` parametrize ids on `test_sum_u{55,85}_INT_1_0` (and the corresponding fp16 / bf16 variants) with `skips=` -- those cases cannot be exercised through the FVP harness because `executorch.devtools.bundled_program.config` rejects `None` as a model input. The dim=None case is properly covered by the existing `SumDefault` class. Test design: - Standard `pipeline.run()` with the same a16w8 kwargs other arm a16w8 tests use (e.g. `test_native_layer_norm_16a8w_u85_INT` in `test_layer_norm.py`): `a16w8_quantization=True, symmetric_io_quantization=True, qtol=128, epsilon=2**-16`. - Numerical comparison is the standard `atol`/`rtol` check from `pipeline.run()` -- no SQNR helpers. - The U85 a16w8 test is wrapped with both `common.XfailIfNoCorstone320` (handles missing-FVP environments via `FileNotFoundError`) and `pytest.mark.xfail(strict=False, reason="...")` (handles the silent-zero bug). Both are function-level decorators that compose cleanly -- pattern matches `test_max_pool1d.py:111-114`. `strict=False` keeps the test target green both on stock Vela 5.0 (cases XFAIL) and once the upstream Vela fix is in tree (cases XPASS allowed). Reviewed By: digantdesai Differential Revision: D103667823
1 parent af90130 commit 7d13791

2 files changed

Lines changed: 73 additions & 2 deletions

File tree

backends/arm/test/ops/test_sum.py

Lines changed: 72 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,8 @@
55

66
from typing import Callable, Tuple
77

8+
import pytest
9+
810
import torch
911
from executorch.backends.arm.test import common
1012

@@ -96,7 +98,18 @@ def test_sum_dim_intlist_tosa_INT(test_data: input_t1):
9698
pipeline.run()
9799

98100

99-
@common.parametrize("test_data", Sum.test_parameters)
101+
# dim=None cases skipped: executorch.devtools.bundled_program.config rejects
102+
# None as a model input (cannot be serialized into the bundled program).
103+
_DIM_NONE_SKIP_REASON = (
104+
"bundled_program cannot serialize None as a model input"
105+
)
106+
_dim_none_skips = {
107+
"dim_None": _DIM_NONE_SKIP_REASON,
108+
"dim_None_4d_tensor": _DIM_NONE_SKIP_REASON,
109+
}
110+
111+
112+
@common.parametrize("test_data", Sum.test_parameters, skips=_dim_none_skips)
100113
@common.XfailIfNoCorstone300
101114
def test_sum_u55_INT_1_0(test_data: Tuple):
102115
pipeline = EthosU55PipelineINT[input_t1](
@@ -108,7 +121,7 @@ def test_sum_u55_INT_1_0(test_data: Tuple):
108121
pipeline.run()
109122

110123

111-
@common.parametrize("test_data", Sum.test_parameters)
124+
@common.parametrize("test_data", Sum.test_parameters, skips=_dim_none_skips)
112125
@common.XfailIfNoCorstone320
113126
def test_sum_u85_INT_1_0(test_data: Tuple):
114127
pipeline = EthosU85PipelineINT[input_t1](
@@ -220,3 +233,60 @@ def test_sum_tosa_FP(test_data: Callable[[], input_t2]):
220233
def test_sum_tosa_INT(test_data: Callable[[], input_t2]):
221234
pipeline = TosaPipelineINT[input_t1](SumDefault(), test_data(), SumDefault.aten_op)
222235
pipeline.run()
236+
237+
238+
# a16w8 (int16 IO + int8 weights) coverage for sum.dim_IntList. Surfaces the
239+
# Ethos-U85 int16 ReduceSum silent-zero issue tracked upstream at
240+
# https://gitlab.arm.com/artificial-intelligence/ethos-u/ethos-u-vela/-/issues/23.
241+
242+
243+
class SumLastDim(torch.nn.Module):
244+
"""Reduce the last dim with keepdim=True."""
245+
246+
def forward(self, x: torch.Tensor) -> torch.Tensor:
247+
return x.sum(dim=-1, keepdim=True)
248+
249+
250+
a16w8_sum_test_parameters = {
251+
"rank1_16": lambda: (torch.rand(16),),
252+
"rank3_8x1x16": lambda: (torch.rand(8, 1, 16),),
253+
"rank3_4x4x16": lambda: (torch.rand(4, 4, 16),),
254+
}
255+
256+
257+
@common.parametrize("test_data", a16w8_sum_test_parameters)
258+
@common.XfailIfNoCorstone300
259+
def test_sum_dim_intlist_a16w8_u55_INT(test_data: Callable[[], input_t1]):
260+
pipeline = EthosU55PipelineINT[input_t1](
261+
SumLastDim(),
262+
test_data(),
263+
aten_op,
264+
exir_ops=[],
265+
a16w8_quantization=True,
266+
symmetric_io_quantization=True,
267+
qtol=128,
268+
epsilon=2**-16,
269+
)
270+
pipeline.run()
271+
272+
273+
# All cases hit upstream Vela issue #23 (linked above). strict=False so the
274+
# test target stays green both on stock Vela 5.0 (cases XFAIL) and once the
275+
# Vela fix is in tree (cases XPASS).
276+
@common.parametrize("test_data", a16w8_sum_test_parameters)
277+
@common.XfailIfNoCorstone320
278+
@pytest.mark.xfail(
279+
reason="Ethos-U85 int16 ReduceSum returns zero (vela#23)", strict=False
280+
)
281+
def test_sum_dim_intlist_a16w8_u85_INT(test_data: Callable[[], input_t1]):
282+
pipeline = EthosU85PipelineINT[input_t1](
283+
SumLastDim(),
284+
test_data(),
285+
aten_op,
286+
exir_ops=[],
287+
a16w8_quantization=True,
288+
symmetric_io_quantization=True,
289+
qtol=128,
290+
epsilon=2**-16,
291+
)
292+
pipeline.run()

backends/arm/test/targets.bzl

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ def define_arm_tests():
3030
"ops/test_slice.py",
3131
"ops/test_sigmoid.py",
3232
"ops/test_sub.py",
33+
"ops/test_sum.py",
3334
"ops/test_tanh.py",
3435
"ops/test_view.py",
3536
"ops/test_cos.py",

0 commit comments

Comments
 (0)