Skip to content

Commit 427bc4e

Browse files
authored
Arm backend: Update Vela to 5.0.0 (#17839)
- Remove now deprecated --debug-force-regor from docs - Remove some now passing xfails Signed-off-by: Erik Lundell <erik.lundell@arm.com>
1 parent 1619308 commit 427bc4e

11 files changed

Lines changed: 22 additions & 72 deletions

File tree

backends/arm/arm_vela.py

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -88,12 +88,7 @@ def run(dir: str) -> bytes:
8888
args.append("--verbose-all")
8989
vela.main(" ".join(args).split(" "))
9090

91-
if any("ethos-u85" in arg for arg in args) or any(
92-
"debug-force-regor" in arg for arg in args
93-
):
94-
np_path = os.path.join(dir, "output", "out_vela.npz")
95-
else:
96-
np_path = os.path.join(dir, "output", "out_sg0_vela.npz")
91+
np_path = os.path.join(dir, "output", "out_vela.npz")
9792

9893
blocks = b""
9994
with np.load(np_path, allow_pickle=False) as data:

backends/arm/ethosu/compile_spec.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,6 @@ def _build_compiler_flags(
7575
f"--accelerator-config={target}",
7676
f"--config={config_ini}",
7777
"--output-format=raw",
78-
"--debug-force-regor",
7978
f"--system-config={system_config}",
8079
f"--memory-mode={memory_mode}",
8180
]
Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
1-
# Copyright 2025 Arm Limited and/or its affiliates.
1+
# Copyright 2025-2026 Arm Limited and/or its affiliates.
22
#
33
# This source code is licensed under the BSD-style license found in the
44
# LICENSE file in the root directory of this source tree.
55

6-
ethos-u-vela == 4.5.0
6+
ethos-u-vela == 5.0.0
77
pte-adapter-model-explorer == 0.0.2

backends/arm/test/common.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ def get_u55_compile_spec(
8686
macs: int = 128,
8787
system_config: str = "Ethos_U55_High_End_Embedded",
8888
memory_mode: str = "Shared_Sram",
89-
extra_flags: str = "--debug-force-regor --output-format=raw --arena-cache-size=2097152",
89+
extra_flags: str = "--arena-cache-size=2097152",
9090
custom_path: Optional[str] = None,
9191
config: Optional[str] = None,
9292
tosa_debug_mode: EthosUCompileSpec.DebugMode | None = None,

backends/arm/test/ops/test_bitwise.py

Lines changed: 6 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -4,10 +4,8 @@
44
# LICENSE file in the root directory of this source tree.
55

66

7-
from copy import copy
87
from typing import Tuple
98

10-
import pytest
119
import torch
1210
from executorch.backends.arm.test import common
1311
from executorch.backends.arm.test.tester.test_pipeline import (
@@ -62,9 +60,6 @@ class BitwiseBinary(torch.nn.Module):
6260

6361
test_data = {**test_data_non_bool, **test_data_bool}
6462

65-
test_data_u85 = copy(test_data)
66-
del test_data_u85["zeros"]
67-
6863

6964
class BitwiseBinaryScalar(torch.nn.Module):
7065
test_data_non_bool = {
@@ -95,9 +90,6 @@ class BitwiseBinaryScalar(torch.nn.Module):
9590

9691
test_data = {**test_data_non_bool, **test_data_bool}
9792

98-
test_data_u85 = copy(test_data)
99-
del test_data_u85["zeros"]
100-
10193

10294
class And(BitwiseBinary):
10395
aten_op = "torch.ops.aten.bitwise_and.Tensor"
@@ -269,7 +261,7 @@ def test_bitwise_and_scalar_u55_INT(test_data: input_t2):
269261
pipeline.run()
270262

271263

272-
@common.parametrize("test_data", AndScalar.test_data_u85)
264+
@common.parametrize("test_data", AndScalar.test_data)
273265
@common.XfailIfNoCorstone320
274266
def test_bitwise_and_scalar_u85_INT(test_data: input_t2):
275267
pipeline = EthosU85PipelineINT[input_t2](
@@ -284,7 +276,7 @@ def test_bitwise_and_scalar_u85_INT(test_data: input_t2):
284276
pipeline.run()
285277

286278

287-
@common.parametrize("test_data", And().test_data_u85)
279+
@common.parametrize("test_data", And().test_data)
288280
@common.XfailIfNoCorstone320
289281
def test_bitwise_and_tensor_u85_INT(test_data: input_t2):
290282
pipeline = EthosU85PipelineINT[input_t2](
@@ -476,7 +468,7 @@ def test_bitwise_xor_scalar_u55_INT(test_data: input_t2):
476468
pipeline.run()
477469

478470

479-
@common.parametrize("test_data", Xor().test_data_u85)
471+
@common.parametrize("test_data", Xor().test_data)
480472
@common.XfailIfNoCorstone320
481473
def test_bitwise_xor_tensor_u85_INT(test_data: input_t2):
482474
pipeline = EthosU85PipelineINT[input_t2](
@@ -491,7 +483,7 @@ def test_bitwise_xor_tensor_u85_INT(test_data: input_t2):
491483
pipeline.run()
492484

493485

494-
@common.parametrize("test_data", XorScalar.test_data_u85)
486+
@common.parametrize("test_data", XorScalar.test_data)
495487
@common.XfailIfNoCorstone320
496488
def test_bitwise_xor_scalar_u85_INT(test_data: input_t2):
497489
pipeline = EthosU85PipelineINT[input_t2](
@@ -683,7 +675,7 @@ def test_bitwise_or_scalar_u55_INT(test_data: input_t2):
683675
pipeline.run()
684676

685677

686-
@common.parametrize("test_data", Or().test_data_u85)
678+
@common.parametrize("test_data", Or().test_data)
687679
@common.XfailIfNoCorstone320
688680
def test_bitwise_or_tensor_u85_INT(test_data: input_t2):
689681
pipeline = EthosU85PipelineINT[input_t2](
@@ -698,7 +690,7 @@ def test_bitwise_or_tensor_u85_INT(test_data: input_t2):
698690
pipeline.run()
699691

700692

701-
@common.parametrize("test_data", OrScalar.test_data_u85)
693+
@common.parametrize("test_data", OrScalar.test_data)
702694
@common.XfailIfNoCorstone320
703695
def test_bitwise_or_scalar_u85_INT(test_data: input_t2):
704696
pipeline = EthosU85PipelineINT[input_t2](
@@ -775,30 +767,3 @@ def test_bitwise_or_scalar_vgf_quant(test_data: input_t2):
775767
quantize=True,
776768
)
777769
pipeline.run()
778-
779-
780-
@pytest.mark.xfail(
781-
reason="MLBEDSW-11029: Fatal Python floating point error in Vela for rank 4 bitwse ops with int32 dtype."
782-
)
783-
def test_bitwise_or_tensor_u85_INT_zeros():
784-
raise RuntimeError(
785-
"Dummy test to xfail mark u85 zeros test case since running the actual test causes a fatal crash."
786-
)
787-
788-
789-
@pytest.mark.xfail(
790-
reason="MLBEDSW-11029: Fatal Python floating point error in Vela for rank 4 bitwse ops with int32 dtype."
791-
)
792-
def test_bitwise_and_tensor_u85_INT_zeros():
793-
raise RuntimeError(
794-
"Dummy test to xfail mark u85 zeros test case since running the actual test causes a fatal crash."
795-
)
796-
797-
798-
@pytest.mark.xfail(
799-
reason="MLBEDSW-11029: Fatal Python floating point error in Vela for rank 4 bitwse ops with int32 dtype."
800-
)
801-
def test_bitwise_xor_tensor_u85_INT_zeros():
802-
raise RuntimeError(
803-
"Dummy test to xfail mark u85 zeros test case since running the actual test causes a fatal crash."
804-
)

backends/arm/test/ops/test_cond.py

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -289,12 +289,6 @@ def test_cond_u55_INT(case: Callable[[], tuple[torch.nn.Module, tuple]]):
289289
xfails={
290290
"nested_one_arg_one_output": "Node submodule_0 target submodule_0 references nonexistent attribute submodule_0",
291291
},
292-
skips={
293-
"one_arg_one_output": "Segfault when transpose goes into cond. MLBEDSW-11416.",
294-
"one_arg_const_one_output": "Segfault when transpose goes into cond. MLBEDSW-11416.",
295-
"multiple_one_arg_one_output": "Segfault when transpose goes into cond. MLBEDSW-11416.",
296-
"one_arg_and_scalar_one_output": "Segfault when transpose goes into cond. MLBEDSW-11416.",
297-
},
298292
)
299293
@common.XfailIfNoCorstone320.with_args(raises=None)
300294
def test_cond_u85_INT(case: Callable[[], tuple[torch.nn.Module, tuple]]):

backends/arm/test/ops/test_pixel_shuffling.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
# Copyright 2025 Arm Limited and/or its affiliates.
1+
# Copyright 2025-2026 Arm Limited and/or its affiliates.
22
#
33
# This source code is licensed under the BSD-style license found in the
44
# LICENSE file in the root directory of this source tree.
@@ -193,7 +193,6 @@ def test_pixel_unshuffle_u55_INT(test_data: input_t1):
193193
@common.parametrize(
194194
"test_data",
195195
PixelUnShuffle.test_data_generators,
196-
xfails={"rand_4d": "MLETORCH-1424: rand test fails"},
197196
)
198197
@common.XfailIfNoCorstone320
199198
def test_pixel_unshuffle_u85_INT(test_data: input_t1):
@@ -223,7 +222,6 @@ def test_pixel_shuffle_u55_INT(test_data: input_t1):
223222
@common.parametrize(
224223
"test_data",
225224
PixelShuffle.test_data_generators,
226-
xfails={"rand_4d": "MLETORCH-1424: rand test fails"},
227225
)
228226
@common.XfailIfNoCorstone320
229227
def test_pixel_shuffle_u85_INT(test_data: input_t1):

backends/arm/test/ops/test_unary_combos.py

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,9 @@
1-
# Copyright 2025 Arm Limited and/or its affiliates.
1+
# Copyright 2025-2026 Arm Limited and/or its affiliates.
22
#
33
# This source code is licensed under the BSD-style license found in the
44
# LICENSE file in the root directory of this source tree.
55
from typing import Tuple
66

7-
import pytest
8-
97
import torch
108
from executorch.backends.arm.test import common
119
from executorch.backends.arm.test.tester.test_pipeline import (
@@ -83,29 +81,32 @@ def forward(self, x):
8381

8482

8583
MODELS = [NegAdd, AbsAdd, MaxAddZero, MinAddZero]
84+
MODEL_DATA = {model.__name__: model for model in MODELS}
8685

8786

8887
def _build(model_cls):
8988
m = model_cls()
9089
return m, m.get_inputs(), model_cls.edge_op_list
9190

9291

93-
@pytest.mark.parametrize("model_cls", MODELS, ids=lambda c: c.__name__)
92+
@common.parametrize("model_cls", MODEL_DATA)
9493
def test_add_tensor_tosa_FP_combos(model_cls):
9594
m, inputs, exir = _build(model_cls)
9695
p = TosaPipelineFP[Tensor1](m, inputs, aten_op=[], exir_op=exir)
9796
p.run()
9897

9998

100-
@pytest.mark.parametrize("model_cls", MODELS, ids=lambda c: c.__name__)
99+
@common.parametrize("model_cls", MODEL_DATA)
101100
def test_add_tensor_tosa_INT_combos(model_cls):
102101
m, inputs, exir = _build(model_cls)
103102
p = TosaPipelineINT[Tensor1](m, inputs, aten_op=[], exir_op=exir, qtol=1)
104103
p.run()
105104

106105

107106
@common.XfailIfNoCorstone300
108-
@pytest.mark.parametrize("model_cls", MODELS, ids=lambda c: c.__name__)
107+
@common.parametrize(
108+
"model_cls", MODEL_DATA, xfails={"NegAdd": "Numerical failure. MLBEDSW-11581"}
109+
)
109110
def test_add_tensor_u55_INT_combos(model_cls):
110111
m, inputs, exir = _build(model_cls)
111112
p = EthosU55PipelineINT[Tensor1](
@@ -118,7 +119,7 @@ def test_add_tensor_u55_INT_combos(model_cls):
118119

119120

120121
@common.XfailIfNoCorstone320
121-
@pytest.mark.parametrize("model_cls", MODELS, ids=lambda c: c.__name__)
122+
@common.parametrize("model_cls", MODEL_DATA)
122123
def test_add_tensor_u85_INT_combos(model_cls):
123124
m, inputs, exir = _build(model_cls)
124125
p = EthosU85PipelineINT[Tensor1](
@@ -131,7 +132,7 @@ def test_add_tensor_u85_INT_combos(model_cls):
131132

132133

133134
@common.SkipIfNoModelConverter
134-
@pytest.mark.parametrize("model_cls", MODELS, ids=lambda c: c.__name__)
135+
@common.parametrize("model_cls", MODEL_DATA)
135136
def test_add_tensor_vgf_quant_combos(model_cls):
136137
m, inputs, exir = _build(model_cls)
137138
p = VgfPipeline[Tensor1](

docs/source/backends/arm-ethos-u/tutorials/ethos-u-getting-started.md

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,6 @@ compile_spec = EthosUCompileSpec(
9595
target="ethos-u55-128",
9696
system_config="Ethos_U55_High_End_Embedded",
9797
memory_mode="Shared_Sram",
98-
extra_flags=["--output-format=raw", "--debug-force-regor"]
9998
)
10099

101100
# Create and configure quantizer to use a symmetric quantization config globally on all nodes

examples/arm/ethos_u_minimal_example.ipynb

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,6 @@
9494
" target=\"ethos-u55-128\",\n",
9595
" system_config=\"Ethos_U55_High_End_Embedded\",\n",
9696
" memory_mode=\"Shared_Sram\",\n",
97-
" extra_flags=[\"--output-format=raw\", \"--debug-force-regor\"]\n",
9897
" )\n",
9998
"\n",
10099
"# Create and configure quantizer to use a symmetric quantization config globally on all nodes\n",

0 commit comments

Comments
 (0)