Skip to content

Commit 08b2987

Browse files
committed
Arm backend: Generate random op test inputs lazily
Several Arm operator tests were creating random inputs at module import time. The Arm test seed is applied later by an autouse pytest fixture, so those tensors were not actually controlled by ARM_TEST_SEED. That made tests nondeterministic across fresh pytest processes and could expose different quantization behavior from run to run. Generate the affected inputs lazily inside each test case so the existing seed fixture makes them reproducible and ARM_TEST_SEED=RANDOM can rerandomize the intended data. Signed-off-by: Per Held <per.held@arm.com> Change-Id: Ic4414da5e84b7fb19275e04399634289b10a0a19
1 parent 8a397b4 commit 08b2987

19 files changed

Lines changed: 433 additions & 329 deletions

backends/arm/test/ops/test_addmm.py

Lines changed: 40 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -28,73 +28,91 @@
2828

2929

3030
test_data_suite = {
31-
"basic": [
31+
"basic": lambda: [
3232
torch.tensor([[1.0, 2.0], [3.0, 4.0]]),
3333
torch.tensor([[1.0, 0.0], [0.0, 1.0]]),
3434
torch.tensor([[1.0, 2.0], [3.0, 4.0]]),
3535
1.0,
3636
1.0,
3737
],
38-
"zeros": [torch.zeros(2, 2), torch.zeros(2, 3), torch.zeros(3, 2), 1.0, 1.0],
39-
"beta_only": [
38+
"zeros": lambda: [
39+
torch.zeros(2, 2),
40+
torch.zeros(2, 3),
41+
torch.zeros(3, 2),
42+
1.0,
43+
1.0,
44+
],
45+
"beta_only": lambda: [
4046
torch.tensor([[10.0, 20.0], [30.0, 40.0]]),
4147
torch.randn(2, 3),
4248
torch.randn(3, 2),
4349
0.0,
4450
1.0,
4551
],
46-
"alpha_only": [
52+
"alpha_only": lambda: [
4753
torch.tensor([[10.0, 20.0], [30.0, 40.0]]),
4854
torch.randn(2, 3),
4955
torch.randn(3, 2),
5056
1.0,
5157
0.0,
5258
],
53-
"scaled": [
59+
"scaled": lambda: [
5460
torch.ones(2, 2),
5561
torch.tensor([[1.0, 2.0], [3.0, 4.0]]),
5662
torch.tensor([[5.0, 6.0], [7.0, 8.0]]),
5763
0.5,
5864
2.0,
5965
],
60-
"negative_scalars": [
66+
"negative_scalars": lambda: [
6167
torch.tensor([[1.0, -1.0], [-1.0, 1.0]]),
6268
torch.tensor([[2.0, 0.0], [0.0, 2.0]]),
6369
torch.tensor([[1.0, 1.0], [1.0, 1.0]]),
6470
-1.0,
6571
-1.0,
6672
],
67-
"non_square": [torch.ones(3, 4), torch.rand(3, 2), torch.rand(2, 4), 1.0, 1.0],
68-
"large_values": [
73+
"non_square": lambda: [
74+
torch.ones(3, 4),
75+
torch.rand(3, 2),
76+
torch.rand(2, 4),
77+
1.0,
78+
1.0,
79+
],
80+
"large_values": lambda: [
6981
torch.full((2, 2), 1e6),
7082
torch.full((2, 3), 1e3),
7183
torch.full((3, 2), 1e3),
7284
1.0,
7385
1.0,
7486
],
75-
"small_values": [
87+
"small_values": lambda: [
7688
torch.full((2, 2), 1e-6),
7789
torch.full((2, 3), 1e-3),
7890
torch.full((3, 2), 1e-3),
7991
1.0,
8092
1.0,
8193
],
82-
"random": [torch.randn(4, 5), torch.randn(4, 3), torch.randn(3, 5), 1.0, 1.0],
83-
"broadcast_bias_row": [
94+
"random": lambda: [
95+
torch.randn(4, 5),
96+
torch.randn(4, 3),
97+
torch.randn(3, 5),
98+
1.0,
99+
1.0,
100+
],
101+
"broadcast_bias_row": lambda: [
84102
torch.randn(1, 2),
85103
torch.randn(3, 4),
86104
torch.randn(4, 2),
87105
1.0,
88106
1.0,
89107
],
90-
"row_bias": [
108+
"row_bias": lambda: [
91109
torch.randn(3, 1),
92110
torch.randn(3, 4),
93111
torch.randn(4, 4),
94112
1.0,
95113
1.0,
96114
],
97-
"scalar_bias": [
115+
"scalar_bias": lambda: [
98116
torch.tensor(2.0),
99117
torch.randn(5, 3),
100118
torch.randn(3, 6),
@@ -120,7 +138,7 @@ def forward(
120138
def test_addmm_tosa_FP(test_data: Tuple):
121139
pipeline = TosaPipelineFP[input_t1](
122140
Addmm(),
123-
(*test_data,),
141+
(*test_data(),),
124142
aten_op=aten_op,
125143
exir_op=exir_op,
126144
)
@@ -131,7 +149,7 @@ def test_addmm_tosa_FP(test_data: Tuple):
131149
def test_addmm_tosa_INT(test_data: Tuple):
132150
pipeline = TosaPipelineINT[input_t1](
133151
Addmm(),
134-
(*test_data,),
152+
(*test_data(),),
135153
aten_op=[],
136154
exir_op=exir_op,
137155
)
@@ -143,7 +161,7 @@ def test_addmm_tosa_INT(test_data: Tuple):
143161
def test_addmm_u55_INT(test_data: Tuple):
144162
pipeline = EthosU55PipelineINT[input_t1](
145163
Addmm(),
146-
(*test_data,),
164+
(*test_data(),),
147165
aten_ops=[],
148166
exir_ops=exir_op,
149167
)
@@ -155,7 +173,7 @@ def test_addmm_u55_INT(test_data: Tuple):
155173
def test_addmm_u85_INT(test_data: Tuple):
156174
pipeline = EthosU85PipelineINT[input_t1](
157175
Addmm(),
158-
(*test_data,),
176+
(*test_data(),),
159177
aten_ops=[],
160178
exir_ops=exir_op,
161179
)
@@ -167,7 +185,7 @@ def test_addmm_u85_INT(test_data: Tuple):
167185
def test_addmm_vgf_no_quant(test_data: input_t1):
168186
pipeline = VgfPipeline[input_t1](
169187
Addmm(),
170-
(*test_data,),
188+
(*test_data(),),
171189
aten_op=aten_op,
172190
exir_op=exir_op,
173191
quantize=False,
@@ -180,7 +198,7 @@ def test_addmm_vgf_no_quant(test_data: input_t1):
180198
def test_addmm_vgf_quant(test_data: input_t1):
181199
pipeline = VgfPipeline[input_t1](
182200
Addmm(),
183-
(*test_data,),
201+
(*test_data(),),
184202
aten_op=[],
185203
exir_op=exir_op,
186204
quantize=True,
@@ -197,7 +215,7 @@ def test_addmm_16a8w_tosa_INT(test_data: input_t1):
197215

198216
pipeline = TosaPipelineINT[input_t1](
199217
Addmm(),
200-
(*test_data,),
218+
(*test_data(),),
201219
aten_op=[],
202220
exir_op=[],
203221
per_channel_quantization=per_channel_quantization,
@@ -223,7 +241,7 @@ def test_addmm_16a8w_u55_INT(test_data: input_t1):
223241

224242
pipeline = EthosU55PipelineINT[input_t1](
225243
Addmm(),
226-
(*test_data,),
244+
(*test_data(),),
227245
aten_ops=[],
228246
exir_ops=[],
229247
per_channel_quantization=per_channel_quantization,
@@ -245,7 +263,7 @@ def test_addmm_16a8w_u85_INT(test_data: input_t1):
245263

246264
pipeline = EthosU85PipelineINT[input_t1](
247265
Addmm(),
248-
(*test_data,),
266+
(*test_data(),),
249267
aten_ops=[],
250268
exir_ops=[],
251269
per_channel_quantization=per_channel_quantization,

backends/arm/test/ops/test_atan.py

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
# Copyright 2025 Arm Limited and/or its affiliates.
1+
# Copyright 2025-2026 Arm Limited and/or its affiliates.
22
#
33
# This source code is licensed under the BSD-style license found in the
44
# LICENSE file in the root directory of this source tree.
@@ -22,14 +22,14 @@
2222
input_t1 = Tuple[torch.Tensor]
2323

2424
test_data_suite = {
25-
"zeros": torch.zeros(1, 10, 10, 10),
26-
"zeros_alt_shape": torch.zeros(1, 10, 3, 5),
27-
"ones": torch.ones(10, 10, 10),
28-
"rand": torch.rand(10, 10) - 0.5,
29-
"rand_alt_shape": torch.rand(1, 10, 3, 5) - 0.5,
30-
"randn_pos": torch.randn(10) + 10,
31-
"randn_neg": torch.randn(10) - 10,
32-
"ramp": torch.arange(-16, 16, 0.2),
25+
"zeros": lambda: torch.zeros(1, 10, 10, 10),
26+
"zeros_alt_shape": lambda: torch.zeros(1, 10, 3, 5),
27+
"ones": lambda: torch.ones(10, 10, 10),
28+
"rand": lambda: torch.rand(10, 10) - 0.5,
29+
"rand_alt_shape": lambda: torch.rand(1, 10, 3, 5) - 0.5,
30+
"randn_pos": lambda: torch.randn(10) + 10,
31+
"randn_neg": lambda: torch.randn(10) - 10,
32+
"ramp": lambda: torch.arange(-16, 16, 0.2),
3333
}
3434

3535

@@ -43,7 +43,7 @@ def forward(self, x: torch.Tensor):
4343
def test_atan_tosa_FP(test_data: Tuple):
4444
pipeline = TosaPipelineFP[input_t1](
4545
Atan(),
46-
(test_data,),
46+
(test_data(),),
4747
aten_op=aten_op,
4848
exir_op=exir_op,
4949
)
@@ -54,7 +54,7 @@ def test_atan_tosa_FP(test_data: Tuple):
5454
def test_atan_tosa_INT(test_data: Tuple):
5555
pipeline = TosaPipelineINT[input_t1](
5656
Atan(),
57-
(test_data,),
57+
(test_data(),),
5858
aten_op=aten_op,
5959
exir_op=exir_op,
6060
)
@@ -66,7 +66,7 @@ def test_atan_tosa_INT(test_data: Tuple):
6666
def test_atan_u55_INT(test_data: Tuple):
6767
pipeline = EthosU55PipelineINT[input_t1](
6868
Atan(),
69-
(test_data,),
69+
(test_data(),),
7070
aten_ops=aten_op,
7171
exir_ops=exir_op,
7272
)
@@ -78,7 +78,7 @@ def test_atan_u55_INT(test_data: Tuple):
7878
def test_atan_u85_INT(test_data: Tuple):
7979
pipeline = EthosU85PipelineINT[input_t1](
8080
Atan(),
81-
(test_data,),
81+
(test_data(),),
8282
aten_ops=aten_op,
8383
exir_ops=exir_op,
8484
)
@@ -90,7 +90,7 @@ def test_atan_u85_INT(test_data: Tuple):
9090
def test_atan_vgf_no_quant(test_data: Tuple):
9191
pipeline = VgfPipeline[input_t1](
9292
Atan(),
93-
(test_data,),
93+
(test_data(),),
9494
aten_op,
9595
exir_op,
9696
quantize=False,
@@ -103,7 +103,7 @@ def test_atan_vgf_no_quant(test_data: Tuple):
103103
def test_atan_vgf_quant(test_data: Tuple):
104104
pipeline = VgfPipeline[input_t1](
105105
Atan(),
106-
(test_data,),
106+
(test_data(),),
107107
aten_op,
108108
exir_op,
109109
quantize=True,

backends/arm/test/ops/test_atanh.py

Lines changed: 15 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -24,13 +24,13 @@
2424

2525

2626
test_data_suite = {
27-
"zeros": torch.zeros(1, 10, 10, 10),
28-
"zeros_alt_shape": torch.zeros(1, 10, 3, 5),
29-
"rand": torch.rand(10, 10) - 0.5,
30-
"rand_alt_shape": torch.rand(1, 10, 3, 5) - 0.5,
31-
"ramp": torch.arange(-1, 1, 0.2),
32-
"near_bounds": torch.tensor([-0.99, -0.9, 0.9, 0.99]),
33-
"on_bounds": torch.tensor([-1.0, 1.0]),
27+
"zeros": lambda: torch.zeros(1, 10, 10, 10),
28+
"zeros_alt_shape": lambda: torch.zeros(1, 10, 3, 5),
29+
"rand": lambda: torch.rand(10, 10) - 0.5,
30+
"rand_alt_shape": lambda: torch.rand(1, 10, 3, 5) - 0.5,
31+
"ramp": lambda: torch.arange(-1, 1, 0.2),
32+
"near_bounds": lambda: torch.tensor([-0.99, -0.9, 0.9, 0.99]),
33+
"on_bounds": lambda: torch.tensor([-1.0, 1.0]),
3434
}
3535

3636

@@ -43,7 +43,7 @@ def forward(self, x: torch.Tensor):
4343
def test_atanh_tosa_FP(test_data: Tuple):
4444
pipeline = TosaPipelineFP[input_t1](
4545
Atanh(),
46-
(test_data,),
46+
(test_data(),),
4747
aten_op=aten_op,
4848
exir_op=exir_op,
4949
)
@@ -52,13 +52,14 @@ def test_atanh_tosa_FP(test_data: Tuple):
5252

5353
@common.parametrize("test_data", test_data_suite)
5454
def test_atanh_tosa_INT(test_data: Tuple):
55+
input_data = test_data()
5556
pipeline = TosaPipelineINT[input_t1](
5657
Atanh(),
57-
(test_data,),
58+
(input_data,),
5859
aten_op=aten_op,
5960
exir_op=exir_op,
6061
)
61-
if torch.any(test_data >= 1) or torch.any(test_data <= -1):
62+
if torch.any(input_data >= 1) or torch.any(input_data <= -1):
6263
# The quantized model will saturate to max/min values while the
6364
# original model will return inf/-inf, so comparison wont be valid here.
6465
pipeline.pop_stage("run_method_and_compare_outputs.original_model")
@@ -70,7 +71,7 @@ def test_atanh_tosa_INT(test_data: Tuple):
7071
def test_atanh_u55_INT(test_data: Tuple):
7172
pipeline = EthosU55PipelineINT[input_t1](
7273
Atanh(),
73-
(test_data,),
74+
(test_data(),),
7475
aten_ops=aten_op,
7576
exir_ops=exir_op,
7677
)
@@ -82,7 +83,7 @@ def test_atanh_u55_INT(test_data: Tuple):
8283
def test_atanh_u85_INT(test_data: Tuple):
8384
pipeline = EthosU85PipelineINT[input_t1](
8485
Atanh(),
85-
(test_data,),
86+
(test_data(),),
8687
aten_ops=aten_op,
8788
exir_ops=exir_op,
8889
)
@@ -94,7 +95,7 @@ def test_atanh_u85_INT(test_data: Tuple):
9495
def test_atanh_vgf_no_quant(test_data: input_t1):
9596
pipeline = VgfPipeline[input_t1](
9697
Atanh(),
97-
(test_data,),
98+
(test_data(),),
9899
aten_op=aten_op,
99100
exir_op=exir_op,
100101
quantize=False,
@@ -107,7 +108,7 @@ def test_atanh_vgf_no_quant(test_data: input_t1):
107108
def test_atanh_vgf_quant(test_data: input_t1):
108109
pipeline = VgfPipeline[input_t1](
109110
Atanh(),
110-
(test_data,),
111+
(test_data(),),
111112
aten_op=aten_op,
112113
exir_op=exir_op,
113114
quantize=True,

0 commit comments

Comments
 (0)