Skip to content

Commit 520566c

Browse files
authored
feat: added checks for support of activations on target (#18102)
### Summary Introduces stricter checks for activation support on the target. The updated validation logic applies to: `clamp`, `hardtanh`, `leaky_relu`, `relu`, `sigmoid`, `tanh`. edit after rework: Introduces stricter checks for support in cases where there might be `relu` or `relu6` operator alone in a partition. In such cases, the Neutron converter fails to delegate the operator. Also fixed a small issue in `hardtanh_converter` related to reading out the arguments from the edge operator. This additional check applies for `relu_converter`, `clamp_converter` and `hardtanh_converter`. Also checked whether the issues does not affect other activations, such as `leaky_relu`, `tanh` and `sigmoid`, but empirically the issue did not occur there. ### Test plan tests can be manually run using `pytest -c /dev/null backends/nxp/tests/` cc @robert-kalmar @JakeStevens @digantdesai @MartinPavella @roman-janik-nxp
1 parent 763cdd1 commit 520566c

9 files changed

Lines changed: 287 additions & 49 deletions

File tree

backends/nxp/backend/ir/converter/node_converter.py

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55

66
import operator
77
from abc import ABC, abstractmethod
8+
from typing import Callable
89

910
import torch
1011

@@ -186,6 +187,35 @@ def _has_shared_q_params_if_quantized(node: Node) -> bool:
186187
# Node not quantized
187188
return True
188189

190+
@staticmethod
191+
def is_node_alone_in_partition(
192+
node: Node, partition_list: list[Partition], filter_fn: Callable[[Node], bool]
193+
) -> bool:
194+
"""Return True if `node` is the only node in its partition for which `filter_fn`
195+
returns True.
196+
197+
The function finds the unique partition containing `node` and applies
198+
`filter_fn` to all nodes in that partition. If only one node passes the
199+
predicate — and that node is `node` — the function returns True.
200+
201+
:param node: The torch.fx.Node to check.
202+
:param partition_list: List of proposed partitions.
203+
:param filter_fn: Predicate applied to nodes in the partition.
204+
`node` is considered alone if it is the only node
205+
for which this predicate returns True.
206+
"""
207+
partitions = [p for p in partition_list if node in p.nodes]
208+
if len(partitions) != 1:
209+
raise ValueError(
210+
"Cannot find a partition of a node in graph. This should not occur."
211+
)
212+
213+
partition = partitions[0]
214+
filtered_partition_nodes = list(filter(filter_fn, partition.nodes))
215+
return (
216+
len(filtered_partition_nodes) == 1 and filtered_partition_nodes[0] == node
217+
)
218+
189219
def assert_convertible(self, node):
190220
"""Assert that the call `is_supported()` returns `True`. Otherwise, raise an exception and print an
191221
error message.

backends/nxp/backend/ir/converter/node_converters/ops_converters/clamp_converter.py

Lines changed: 10 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,9 @@
1212
from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import (
1313
BuiltinOperator,
1414
)
15+
from executorch.backends.nxp.backend.neutron_operator_support import (
16+
activation_supported_on_target,
17+
)
1518
from executorch.backends.nxp.backend.neutron_target_spec import NeutronTargetSpec
1619
from torch.fx import Node
1720
from torch.fx.passes.infra.partitioner import Partition
@@ -76,19 +79,15 @@ def supports_partitioning_result(
7679
) -> bool:
7780
bounds = cls._get_clamp_bounds(node)
7881

82+
# Neutron cannot delegate a partition where ReLU or ReLU6 is the only operator
83+
# and at the same time the node does not satisfy delegation requirements.
84+
# In contrast, ReLUN1To1 and ReLU0To1 are supported and delegated successfuly.
7985
if bounds in [cls.SUPPORTED_BOUNDS["Relu"], cls.SUPPORTED_BOUNDS["Relu6"]]:
80-
# If this is the only operator in the partition, NeutronConverter will not create a NeutronNode for some
81-
# reason.
82-
clamp_partitions = [p for p in partition_list if node in p.nodes]
83-
if len(clamp_partitions) != 1:
84-
return False # Should never happen
85-
86-
clamp_partition = clamp_partitions[0]
87-
non_q_dq_partition_nodes = list(
88-
filter(is_not_qdq_node, clamp_partition.nodes)
86+
is_alone_in_partition = cls.is_node_alone_in_partition(
87+
node, partition_list, filter_fn=is_not_qdq_node
8988
)
90-
if len(non_q_dq_partition_nodes) <= 1:
91-
return False # This would be the only node in the partition, which would cause a crash later on.
89+
if is_alone_in_partition:
90+
return activation_supported_on_target(node, neutron_target_spec)
9291

9392
return True
9493

Lines changed: 72 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,47 +1,112 @@
1-
# Copyright 2025 NXP
1+
# Copyright 2025-2026 NXP
22
#
33
# This source code is licensed under the BSD-style license found in the
44
# LICENSE file in the root directory of this source tree.
55

66
from executorch.backends.nxp.backend.ir.converter.node_converter import (
77
CustomDelegationOptions,
8+
is_not_qdq_node,
89
NodeConverter,
10+
Partition,
911
)
1012
from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import (
1113
BuiltinOperator,
1214
)
15+
from executorch.backends.nxp.backend.neutron_operator_support import (
16+
activation_supported_on_target,
17+
)
18+
from executorch.backends.nxp.backend.neutron_target_spec import NeutronTargetSpec
1319
from torch.fx import Node
1420
from torch.nn import Parameter
1521

1622

1723
class HardTanhConverter(NodeConverter):
1824

1925
# Maps possible input parameters of HardTanh to equivalent ReLU-based operators supported by TFLite.
20-
supported_modes_map = {
26+
SUPPORTED_MODES_MAP = {
2127
(0.0, 6.0): BuiltinOperator.RELU6,
2228
(-1.0, 1.0): BuiltinOperator.RELU_N1_TO_1,
2329
(0.0, 1.0): BuiltinOperator.RELU_0_TO_1,
2430
(0.0, float("inf")): BuiltinOperator.RELU,
2531
}
2632

33+
# Maps possible modes of HardTanh to equivalent ReLU bounds.
34+
SUPPORTED_BOUNDS_MAP = {
35+
"ReluN1To1": (-1.0, 1.0),
36+
"Relu0To1": (0.0, 1.0),
37+
"Relu6": (0.0, 6.0),
38+
"Relu": (0.0, float("inf")),
39+
}
40+
41+
@staticmethod
42+
def _get_hardtanh_bounds(node: Node) -> tuple[float, float]:
43+
args = node.args
44+
45+
match len(args):
46+
case 1:
47+
min_val = -1
48+
max_val = 1
49+
50+
case 2:
51+
min_val = args[1]
52+
max_val = 1
53+
54+
case 3:
55+
min_val = args[1]
56+
max_val = args[2]
57+
58+
case _:
59+
# should not occur
60+
raise ValueError(
61+
f"Unexpected number of arguments for HardTanh node: {len(args)}"
62+
)
63+
64+
return min_val, max_val
65+
2766
@staticmethod
2867
def _is_supported_in_IR(
2968
node: Node,
3069
parameters_mapping: dict[str, Parameter],
3170
custom_delegation_options: CustomDelegationOptions,
3271
) -> bool:
33-
_, min_value, max_value = node.args
34-
return (min_value, max_value) in HardTanhConverter.supported_modes_map.keys()
72+
bounds = HardTanhConverter._get_hardtanh_bounds(node)
73+
return bounds in HardTanhConverter.SUPPORTED_MODES_MAP
74+
75+
@classmethod
76+
def supports_partitioning_result(
77+
cls,
78+
node: Node,
79+
partition_list: list[Partition],
80+
custom_delegation_options: CustomDelegationOptions,
81+
neutron_target_spec: NeutronTargetSpec,
82+
parameters_mapping: dict[str, Parameter],
83+
) -> bool:
84+
bounds = HardTanhConverter._get_hardtanh_bounds(node)
85+
86+
# Neutron cannot delegate a partition where ReLU or ReLU6 is the only operator
87+
# and at the same time the node does not satisfy delegation requirements.
88+
# In contrast, ReLUN1To1 and ReLU0To1 are supported and delegated successfuly.
89+
if bounds in [
90+
cls.SUPPORTED_BOUNDS_MAP["Relu"],
91+
cls.SUPPORTED_BOUNDS_MAP["Relu6"],
92+
]:
93+
is_alone_in_partition = cls.is_node_alone_in_partition(
94+
node, partition_list, filter_fn=is_not_qdq_node
95+
)
96+
if is_alone_in_partition:
97+
return activation_supported_on_target(node, neutron_target_spec)
98+
99+
return True
35100

36101
def convert(self, node: Node):
37-
"""Convert 'aten::hardtanh' to it's supported ReLU equivalent."""
102+
"""Convert 'aten::hardtanh' to its supported ReLU equivalent."""
38103
self.assert_convertible(node)
39104

40105
t_op = self._create_tflite_op_with_io_tensors(node)
41106

42-
_, min_value, max_value = node.args
107+
bounds = HardTanhConverter._get_hardtanh_bounds(node)
43108

44-
op = self.supported_modes_map[(min_value, max_value)]
109+
op = self.SUPPORTED_MODES_MAP[bounds]
45110
t_op.opcode_index = self.builder.op_code_index_for_op_type(op)
46111

47112
self.builder.append_operators([t_op])

backends/nxp/backend/ir/converter/node_converters/ops_converters/relu_converter.py

Lines changed: 24 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,21 @@
1-
# Copyright 2024-2025 NXP
1+
# Copyright 2024-2026 NXP
22
#
33
# This source code is licensed under the BSD-style license found in the
44
# LICENSE file in the root directory of this source tree.
55

66
from executorch.backends.nxp.backend.ir.converter.node_converter import (
77
CustomDelegationOptions,
8+
is_not_qdq_node,
89
NodeConverter,
10+
Partition,
911
)
1012
from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import (
1113
BuiltinOperator,
1214
)
15+
from executorch.backends.nxp.backend.neutron_operator_support import (
16+
activation_supported_on_target,
17+
NeutronTargetSpec,
18+
)
1319
from torch.fx import Node
1420
from torch.nn import Parameter
1521

@@ -24,6 +30,23 @@ def _is_supported_in_IR(
2430
) -> bool:
2531
return True
2632

33+
@classmethod
34+
def supports_partitioning_result(
35+
cls,
36+
node: Node,
37+
partition_list: list[Partition],
38+
custom_delegation_options: CustomDelegationOptions,
39+
neutron_target_spec: NeutronTargetSpec,
40+
parameters_mapping: dict[str, Parameter],
41+
) -> bool:
42+
is_alone_in_partition = cls.is_node_alone_in_partition(
43+
node, partition_list, filter_fn=is_not_qdq_node
44+
)
45+
if is_alone_in_partition:
46+
return activation_supported_on_target(node, neutron_target_spec)
47+
48+
return True
49+
2750
def convert(self, node: Node):
2851
self.assert_convertible(node)
2952

backends/nxp/backend/neutron_operator_support.py

Lines changed: 27 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,15 @@
1-
# Copyright 2025 NXP
1+
# Copyright 2025-2026 NXP
22
#
33
# This source code is licensed under the BSD-style license found in the
44
# LICENSE file in the root directory of this source tree.
55

6+
from executorch.backends.nxp.backend.data_format import NXP_NODE_FORMAT
7+
from executorch.backends.nxp.backend.edge_helper import input_tensor
8+
from executorch.backends.nxp.backend.ir.converter.conversion.translator import (
9+
dims_to_channels_last,
10+
)
611
from executorch.backends.nxp.backend.neutron_target_spec import NeutronTargetSpec
12+
from torch.fx import Node
713

814

915
def is_tensor_invariant_permutation(
@@ -77,3 +83,23 @@ def transposition_is_supported_on_neutron(
7783
return True
7884

7985
return False
86+
87+
88+
def activation_supported_on_target(
89+
node: Node, neutron_target_spec: NeutronTargetSpec
90+
) -> bool:
91+
"""This function determines if the current NeutronSoftware properly supports an activation operator represented by the given node.
92+
93+
:param node: The node representing the activation operator.
94+
:param neutron_target_spec: Object for querying the target platform to retrieve its properties.
95+
"""
96+
input_shape = list(input_tensor(node, 0).shape)
97+
if node.args[0].meta[NXP_NODE_FORMAT].is_channels_first():
98+
input_shape = dims_to_channels_last(input_shape)
99+
100+
c = input_shape[-1]
101+
num_macs = neutron_target_spec.get_num_macs()
102+
103+
# activations in Neutron are delegable only
104+
# if `num_channels` % `num_macs` == 0
105+
return c % num_macs == 0

backends/nxp/tests/ir/converter/node_converter/test_clamp_converter.py

Lines changed: 15 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -100,21 +100,28 @@ def test_convert_clamp__supported(mocker, min, max):
100100

101101
# noinspection PyShadowingBuiltins
102102
@pytest.mark.parametrize(
103-
"min, max",
103+
"input_shape, min, max",
104104
[
105-
pytest.param(0, 6, id="min = 0, max = 6 (Relu6)"),
106-
pytest.param(0, None, id="min = 0, max = None (Relu)"),
105+
pytest.param(
106+
(1, 7, 9, 11),
107+
0,
108+
6,
109+
id="min = 0, max = 6 (Relu6), num_channels not divisible by NUM_MACS, alone in partition",
110+
),
111+
pytest.param(
112+
(1, 7, 9, 11),
113+
0,
114+
None,
115+
id="min = 0, max = None (Relu), num_channels not divisible by NUM_MACS, alone in partition",
116+
),
107117
],
108118
)
109-
def test_convert_clamp__single_op__not_delegated_variants(min, max):
110-
# Test that Clamp representable as Relu6 or Relu is NOT delegated, because it is a single op model which is not
111-
# supported by Neutron.
112-
input_shape = (23,)
119+
def test_convert_clamp__unsupported_shape(input_shape, min, max):
113120
model = ClampModule(min, max)
114121

115122
delegated_ep = to_quantized_edge_program(model, input_shape).exported_program()
116123

117-
# Make sure the `clamp` was NOT delegated (single op model).
124+
# Make sure the `clamp` was NOT delegated.
118125
assert not graph_contains_any_of_ops(delegated_ep.graph, [ExecutorchDelegateCall])
119126
assert graph_contains_any_of_ops(delegated_ep.graph, [Clamp])
120127

0 commit comments

Comments
 (0)