Skip to content

Commit ffc4705

Browse files
NXP backend: Add support for aten.leaky_relu.default. (#17305)
### Summary Add support for `aten.leaky_relu.default` to the NXP backend. ### Test plan Unit-tests provided. cc @robert-kalmar @JakeStevens @digantdesai
1 parent 7b843e4 commit ffc4705

8 files changed

Lines changed: 190 additions & 1 deletion

File tree

backends/nxp/backend/edge_program_converter.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@
3737
exir_ops.edge.aten.constant_pad_nd.default: ConstantPadNDConverter, # noqa F405
3838
exir_ops.edge.aten.convolution.default: ConvolutionConverter, # noqa F405
3939
exir_ops.edge.aten.hardtanh.default: HardTanhConverter, # noqa F405
40+
exir_ops.edge.aten.leaky_relu.default: LeakyReluConverter, # noqa F405
4041
exir_ops.edge.aten.max_pool2d.default: MaxPool2dConverter, # noqa F405
4142
exir_ops.edge.aten.mean.dim: MeanDimConverter, # noqa F405
4243
exir_ops.edge.aten.mm.default: MMConverter, # noqa F405

backends/nxp/backend/ir/converter/node_converters/ops_converters/__init__.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,9 @@
3131
from executorch.backends.nxp.backend.ir.converter.node_converters.ops_converters.hardtanh_converter import (
3232
HardTanhConverter,
3333
)
34+
from executorch.backends.nxp.backend.ir.converter.node_converters.ops_converters.leaky_relu_converter import (
35+
LeakyReluConverter,
36+
)
3437
from executorch.backends.nxp.backend.ir.converter.node_converters.ops_converters.max_pool_2d_converter import (
3538
MaxPool2dConverter,
3639
)
@@ -99,6 +102,7 @@
99102
"ConstantPadNDConverter",
100103
"ConvolutionConverter",
101104
"HardTanhConverter",
105+
"LeakyReluConverter",
102106
"MaxPool2dConverter",
103107
"MeanDimConverter",
104108
"MMConverter",
Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
# Copyright 2026 NXP
2+
#
3+
# This source code is licensed under the BSD-style license found in the
4+
# LICENSE file in the root directory of this source tree.
5+
6+
from executorch.backends.nxp.backend.ir.converter.node_converter import (
7+
CustomDelegationOptions,
8+
NodeConverter,
9+
)
10+
from executorch.backends.nxp.backend.ir.tflite_generator.builtin_options.leaky_relu_options import (
11+
LeakyRelu,
12+
)
13+
from torch.fx import Node
14+
from torch.nn import Parameter
15+
16+
17+
class LeakyReluConverter(NodeConverter):
18+
19+
@staticmethod
20+
def _is_supported_in_IR(
21+
node: Node,
22+
parameters_mapping: dict[str, Parameter],
23+
custom_delegation_options: CustomDelegationOptions,
24+
) -> bool:
25+
return True
26+
27+
def convert(self, node: Node):
28+
"""Convert the `aten.leaky_relu.default` operator to Neutron IR `LeakyRelu`.
29+
The schema is:
30+
aten::leaky_relu(
31+
Tensor self,
32+
Scalar negative_slope=0.01
33+
) -> Tensor
34+
"""
35+
self.assert_convertible(node)
36+
37+
alpha = node.args[1] if len(node.args) > 1 else 0.01
38+
39+
t_op = self._create_tflite_op_with_io_tensors(node)
40+
t_op.builtin_options = LeakyRelu(alpha)
41+
42+
self.builder.append_operators([t_op])

backends/nxp/neutron_partitioner.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -210,6 +210,7 @@ def tag_qdq_clusters(self, nodes: list[torch.fx.Node]):
210210
exir_ops.edge.aten.constant_pad_nd.default: ConstantPadNDConverter, # noqa F405
211211
exir_ops.edge.aten.convolution.default: ConvolutionConverter, # noqa F405
212212
exir_ops.edge.aten.hardtanh.default: HardTanhConverter, # noqa F405
213+
exir_ops.edge.aten.leaky_relu.default: LeakyReluConverter, # noqa F405
213214
exir_ops.edge.aten.max_pool2d.default: MaxPool2dConverter, # noqa F405
214215
exir_ops.edge.aten.max_pool2d_with_indices.default: MaxPool2dConverter, # noqa F405
215216
exir_ops.edge.aten.mean.dim: MeanDimConverter, # noqa F405

backends/nxp/quantizer/neutron_quantizer.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,8 @@
2828
FlattenPattern,
2929
HardTanhInPlacePattern,
3030
HardTanhPattern,
31+
LeakyReluInPlacePattern,
32+
LeakyReluPattern,
3133
LinearPattern,
3234
MaxPoolPattern,
3335
MeanDimPattern,
@@ -262,6 +264,8 @@ def __init__(self, neutron_target_spec: NeutronTargetSpec, is_qat: bool = False)
262264
OpQuantizer(FlattenPattern(is_qat=is_qat), static_qconfig),
263265
OpQuantizer(HardTanhPattern(is_qat=is_qat), static_qconfig),
264266
OpQuantizer(HardTanhInPlacePattern(is_qat=is_qat), static_qconfig),
267+
OpQuantizer(LeakyReluPattern(is_qat=is_qat), static_fc_qconfig),
268+
OpQuantizer(LeakyReluInPlacePattern(is_qat=is_qat), static_fc_qconfig),
265269
OpQuantizer(LinearPattern(self, is_qat=is_qat), static_fc_qconfig),
266270
OpQuantizer(MaxPoolPattern(is_qat=is_qat), static_qconfig),
267271
OpQuantizer(MeanDimPattern(is_qat=is_qat), static_qconfig),

backends/nxp/quantizer/patterns.py

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,6 @@
2525
QuantizationSpec,
2626
SharedQuantizationSpec,
2727
)
28-
2928
from torchao.quantization.pt2e.quantizer.quantizer import Q_ANNOTATION_KEY
3029

3130

@@ -648,6 +647,20 @@ def replacement_op(self):
648647
raise AssertionError()
649648

650649

650+
class LeakyReluPattern(SingleInputBasicPattern):
651+
"""Quantizer for the `aten.leaky_relu.default` operator."""
652+
653+
def partition_types(self):
654+
return [torch.ops.aten.leaky_relu.default]
655+
656+
657+
class LeakyReluInPlacePattern(SingleInputBasicPattern):
658+
"""Quantizer for the `aten.leaky_relu.default` operator, with the parameter `inplace=True`."""
659+
660+
def partition_types(self):
661+
return [torch.ops.aten.leaky_relu_.default]
662+
663+
651664
class LinearPattern(QuantizationPattern):
652665
def __init__(self, neutron_quantizer, is_qat: bool = False):
653666
super().__init__(is_qat=is_qat)
Lines changed: 123 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,123 @@
1+
# Copyright 2026 NXP
2+
#
3+
# This source code is licensed under the BSD-style license found in the
4+
# LICENSE file in the root directory of this source tree.
5+
6+
import numpy as np
7+
import pytest
8+
import torch
9+
10+
from executorch.backends.nxp.backend.edge_program_converter import (
11+
EdgeProgramToIRConverter,
12+
)
13+
from executorch.backends.nxp.tests.executorch_pipeline import to_quantized_edge_program
14+
from executorch.backends.nxp.tests.executors import (
15+
convert_run_compare,
16+
graph_contains_any_of_ops,
17+
)
18+
from executorch.exir.dialects._ops import ops as exir_ops
19+
20+
21+
@pytest.fixture(autouse=True)
22+
def reseed_model_per_test_run():
23+
torch.manual_seed(42)
24+
np.random.seed(23)
25+
26+
27+
ExecutorchDelegateCall = torch.ops.higher_order.executorch_call_delegate
28+
LeakyRelu2D = exir_ops.edge.aten.leaky_relu.default
29+
30+
31+
def _assert_successful_delegation(model, input_shape, mocker, atol=0):
32+
converter_spy = mocker.spy(EdgeProgramToIRConverter, "convert_program")
33+
delegated_ep = to_quantized_edge_program(model, input_shape).exported_program()
34+
35+
# Make sure the `leaky_relu` was delegated.
36+
assert graph_contains_any_of_ops(delegated_ep.graph, [ExecutorchDelegateCall])
37+
assert not graph_contains_any_of_ops(delegated_ep.graph, [LeakyRelu2D])
38+
39+
# Verify correct behavior of the converted NeutronIR model.
40+
intermediate_ep = converter_spy.call_args.args[1]
41+
neutron_ir_model, _ = converter_spy.spy_return
42+
43+
input_data = (
44+
np.random.random(input_shape).astype(np.float32) * 256.0 - 128.0
45+
).astype(np.int8)
46+
47+
# Make sure the tested program contains the `leaky_relu`.
48+
assert graph_contains_any_of_ops(intermediate_ep.graph, [LeakyRelu2D])
49+
50+
convert_run_compare(
51+
intermediate_ep, tfl_model=neutron_ir_model, input_data=input_data, atol=atol
52+
)
53+
54+
55+
class LeakyReluModule(torch.nn.Module):
56+
57+
def __init__(self, *args, **kwargs):
58+
super().__init__()
59+
self.leaky_relu = torch.nn.LeakyReLU(*args, **kwargs)
60+
61+
def forward(self, x):
62+
return self.leaky_relu(x)
63+
64+
65+
@pytest.mark.parametrize(
66+
"alpha",
67+
[
68+
0.01, # Default value.
69+
0.1,
70+
3.14159,
71+
0.0,
72+
1.0,
73+
],
74+
ids=lambda alpha: f"alpha = {alpha}",
75+
)
76+
def test_convert_leaky_relu__alpha(mocker, alpha):
77+
_assert_successful_delegation(
78+
LeakyReluModule(negative_slope=alpha),
79+
(23,),
80+
mocker,
81+
atol=1, # Common quantization rounding error.
82+
)
83+
84+
85+
def test_convert_leaky_relu__default_alpha(mocker):
86+
_assert_successful_delegation(
87+
LeakyReluModule(), # Leave the default alpha.
88+
(23,),
89+
mocker,
90+
)
91+
92+
93+
@pytest.mark.parametrize(
94+
"inplace",
95+
[False, True],
96+
ids=lambda inplace: f"inplace = {inplace}",
97+
)
98+
def test_convert_leaky_relu__inplace(mocker, inplace):
99+
_assert_successful_delegation(
100+
LeakyReluModule(inplace=inplace),
101+
(23,),
102+
mocker,
103+
)
104+
105+
106+
@pytest.mark.parametrize(
107+
"input_shape",
108+
[
109+
(5,),
110+
(4, 5),
111+
(3, 4, 5),
112+
(2, 3, 4, 5),
113+
(1, 2, 3, 4, 5),
114+
],
115+
ids=lambda input_shape: f"{len(input_shape)}D",
116+
)
117+
def test_convert_leaky_relu__ranks(mocker, input_shape: tuple[int, ...]):
118+
_assert_successful_delegation(
119+
LeakyReluModule(),
120+
input_shape,
121+
mocker,
122+
atol=1, # Common quantization rounding error.
123+
)

docs/source/backends/nxp/op-support.csv

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ aten.clone.default,int8,static int8,
1010
aten.constant_pad_nd.default,int8,static int8,"H or W padding only"
1111
aten.convolution.default,int8,static int8,"1D or 2D convolution, constant weights, groups=1 or groups=channels_count (depthwise)"
1212
aten.hardtanh.default,int8,static int8,"supported ranges: <0,6>, <-1, 1>, <0,1>, <0,inf>"
13+
aten.leaky_relu.default,int8,static int8,
1314
aten.max_pool2d.default,int8,static int8,"dilation=1, ceil_mode=False"
1415
aten.max_pool2d_with_indices.default,int8,static int8,"dilation=1, ceil_mode=False"
1516
aten.mean.dim,int8,static int8,"4D tensor only, dims = [-1,-2] or [-2,-1]"

0 commit comments

Comments
 (0)