Skip to content

Commit 99dcbb1

Browse files
Add a16w8 per-op test for conv1d (#19597)
Summary: Add int16 activation / int8 weight (a16w8) quantization tests for `aten.conv1d` on Ethos-U55 and Ethos-U85. ## Changes - Add `a16w8_conv1d_test_parameters` dict with 14 test configurations (7 conv configs × {per_channel_quant=True, False}) covering kernel sizes 1/3/5, stride 1/2, dilation, depthwise, and no-bias variants - Add `test_conv1d_a16w8_u55_INT` using `EthosU55PipelineINT` with `a16w8_quantization=True, symmetric_io_quantization=True, per_channel_quantization=<varied>, qtol=128, epsilon=2**-16` - Add `test_conv1d_a16w8_u85_INT` using `EthosU85PipelineINT` with same kwargs - Register `ops/test_conv1d.py` in `fbcode/` and `xplat/` `targets.bzl` bypass-pytorch-oss-checks Differential Revision: D104532360
1 parent 066cd37 commit 99dcbb1

2 files changed

Lines changed: 74 additions & 0 deletions

File tree

backends/arm/test/ops/test_conv1d.py

Lines changed: 73 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -399,3 +399,76 @@ def test_convolution_1d_vgf_quant_a8w4(test_data):
399399
get_symmetric_a8w4_quantization_config(is_per_channel=per_channel_quantization)
400400
)
401401
pipeline.run()
402+
403+
404+
# a16w8 (int16 activation, int8 weight) quantization test configurations
405+
a16w8_conv1d_test_parameters = {
406+
f"{k},per_channel_quant={q}": (lambda v=v, q=q: (v(), q))
407+
for (k, v) in {
408+
"k1_1x2x128_st1": lambda: Conv1d(
409+
in_channels=2, out_channels=1, kernel_size=1,
410+
stride=1, padding=0, length=128, batches=1,
411+
),
412+
"k3_1x3x64_st1_pd1": lambda: Conv1d(
413+
in_channels=3, out_channels=4, kernel_size=3,
414+
stride=1, padding=1, length=64, batches=1,
415+
),
416+
"k5_1x2x64_st1_pd2": lambda: Conv1d(
417+
in_channels=2, out_channels=3, kernel_size=5,
418+
stride=1, padding=2, length=64, batches=1,
419+
),
420+
"k3_1x3x32_st2_pd1": lambda: Conv1d(
421+
in_channels=3, out_channels=4, kernel_size=3,
422+
stride=2, padding=1, length=32, batches=1,
423+
),
424+
"k3_1x3x32_st1_dl2": lambda: Conv1d(
425+
in_channels=3, out_channels=4, kernel_size=3,
426+
stride=1, padding=0, dilation=2, length=32, batches=1,
427+
),
428+
"k3_1x4x32_st1_pd1_depthwise": lambda: Conv1d(
429+
in_channels=4, out_channels=4, kernel_size=3,
430+
stride=1, padding=1, groups=4, length=32, batches=1,
431+
),
432+
"k3_1x3x64_st1_pd1_nobias": lambda: Conv1d(
433+
in_channels=3, out_channels=4, kernel_size=3,
434+
stride=1, padding=1, bias=False, length=64, batches=1,
435+
),
436+
}.items()
437+
for q in [True, False]
438+
}
439+
440+
441+
@common.parametrize("test_data", a16w8_conv1d_test_parameters)
442+
@common.XfailIfNoCorstone300
443+
def test_conv1d_a16w8_u55_INT(test_data):
444+
model, per_channel_quantization = test_data()
445+
pipeline = EthosU55PipelineINT[input_t](
446+
model,
447+
model.get_inputs(),
448+
aten_op,
449+
exir_op,
450+
a16w8_quantization=True,
451+
symmetric_io_quantization=True,
452+
per_channel_quantization=per_channel_quantization,
453+
qtol=128,
454+
epsilon=2**-16,
455+
)
456+
pipeline.run()
457+
458+
459+
@common.parametrize("test_data", a16w8_conv1d_test_parameters)
460+
@common.XfailIfNoCorstone320
461+
def test_conv1d_a16w8_u85_INT(test_data):
462+
model, per_channel_quantization = test_data()
463+
pipeline = EthosU85PipelineINT[input_t](
464+
model,
465+
model.get_inputs(),
466+
aten_op,
467+
exir_op,
468+
a16w8_quantization=True,
469+
symmetric_io_quantization=True,
470+
per_channel_quantization=per_channel_quantization,
471+
qtol=128,
472+
epsilon=2**-16,
473+
)
474+
pipeline.run()

backends/arm/test/targets.bzl

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@ def define_arm_tests():
4040
"ops/test_reciprocal.py",
4141
"ops/test_mean_dim.py",
4242
"ops/test_var.py",
43+
"ops/test_conv1d.py",
4344
]
4445

4546
# Quantization

0 commit comments

Comments
 (0)