Skip to content

Commit 28bb54c

Browse files
committed
NXP backend: Enable constant_pad_nd with new Neutron flow.
1 parent 5971a4b commit 28bb54c

4 files changed

Lines changed: 162 additions & 29 deletions

File tree

backends/nxp/backend/ir/converter/node_converters/ops_converters/constant_pad_nd_converter.py

Lines changed: 26 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -41,19 +41,25 @@ def _is_supported_on_target(
4141
parameters_mapping: dict[str, Parameter],
4242
custom_delegation_options: CustomDelegationOptions,
4343
) -> bool:
44-
paddings = node.args[1]
45-
if node.meta[NXP_NODE_FORMAT].is_channels_first():
46-
# Dim `1` will end up being the channels. It is padded by paddings[4:6].
47-
if len(paddings) > 4 and paddings[4:6] != [0, 0]:
48-
# Attempt to Pad channels dimension -> currently not supported
49-
return False
50-
else:
51-
# Dim `-1` will end up being the channels. It is padded by paddings[:2].
52-
if len(paddings) > 0 and paddings[:2] != [0, 0]:
53-
# Attempt to Pad channels dimension -> currently not supported
54-
return False
44+
if custom_delegation_options.use_new_flow_neutron_c:
45+
# Requirements specified by the new Neutron flow documentation.
5546

56-
return True
47+
return True # There are no requirements.
48+
49+
else:
50+
paddings = node.args[1]
51+
if node.meta[NXP_NODE_FORMAT].is_channels_first():
52+
# Dim `1` will end up being the channels. It is padded by paddings[4:6].
53+
if len(paddings) > 4 and paddings[4:6] != [0, 0]:
54+
# Attempt to Pad channels dimension -> currently not supported
55+
return False
56+
else:
57+
# Dim `-1` will end up being the channels. It is padded by paddings[:2].
58+
if len(paddings) > 0 and paddings[:2] != [0, 0]:
59+
# Attempt to Pad channels dimension -> currently not supported
60+
return False
61+
62+
return True
5763

5864
@staticmethod
5965
def _is_supported_in_IR(
@@ -110,7 +116,14 @@ def _convert_paddings_to_tflite(
110116
return paddings
111117

112118
def convert(self, node: Node):
113-
"""Convert the `aten.constant_pad_nd` operator to TFLite `PadV2`."""
119+
"""Convert the `aten.constant_pad_nd` operator to NeutronIR `PadV2`.
120+
The ExecuTorch schema is:
121+
constant_pad_nd(
122+
Tensor self,
123+
SymInt[] pad,
124+
Scalar value=0
125+
) -> Tensor
126+
"""
114127
self.assert_convertible(node)
115128

116129
t_op = self._create_tflite_op_with_io_tensors(node)

backends/nxp/tests/ir/converter/node_converter/test_constant_pad_nd_converter.py

Lines changed: 128 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,15 @@
44
# LICENSE file in the root directory of this source tree.
55

66
import numpy as np
7+
8+
# noinspection PyUnusedImports
79
import pytest
810
import torch
11+
912
from executorch.backends.nxp.backend.ir.conversion_config import ConversionConfig
13+
from executorch.backends.nxp.backend.ir.converter.builder.model_builder import (
14+
ModelBuilder,
15+
)
1016
from executorch.backends.nxp.backend.ir.converter.node_converters.ops_converters.constant_pad_nd_converter import (
1117
ConstantPadNDConverter,
1218
)
@@ -17,16 +23,18 @@
1723
from executorch.backends.nxp.tests.executors import (
1824
convert_run_compare,
1925
graph_contains_any_of_ops,
26+
OverrideTargetSupportCheck,
2027
ToNCHWPreprocess,
2128
ToNHWCPreprocess,
2229
)
30+
from executorch.backends.nxp.tests.graph_verifier import DetailedGraphVerifier
2331
from executorch.backends.nxp.tests.models import (
2432
ConstantPadNDConvModule,
2533
ConstantPadNDModule,
2634
)
35+
from executorch.backends.nxp.tests.nsys_testing import lower_run_compare
36+
from executorch.backends.nxp.tests.ops_aliases import ConstantPadND, Convolution
2737
from executorch.backends.nxp.tests.use_qat import * # noqa F403
28-
from executorch.backends.nxp.tests.executors import OverrideTargetSupportCheck
29-
from executorch.exir.dialects._ops import ops as exir_ops
3038

3139

3240
@pytest.fixture(autouse=True)
@@ -158,9 +166,8 @@ def test_constant_pad_nd__unsupported_paddings(input_shape, paddings, use_qat):
158166
model, input_shape, use_qat=use_qat
159167
).exported_program()
160168

161-
nodes = list(exec_program.graph.nodes)
162169
# There is at least one non-delegated Pad node
163-
assert any(node.name == "aten_constant_pad_nd_default" for node in nodes)
170+
assert graph_contains_any_of_ops(exec_program.graph, [ConstantPadND])
164171

165172

166173
@pytest.mark.xfail(reason="EIEX=855")
@@ -173,9 +180,7 @@ def test_constant_pad_nd__delegation__formatless__supported_padding(use_qat):
173180
).exported_program()
174181

175182
# Make sure the `pad` was delegated.
176-
assert not graph_contains_any_of_ops(
177-
exec_program.graph, [exir_ops.edge.aten.constant_pad_nd.default]
178-
)
183+
assert not graph_contains_any_of_ops(exec_program.graph, [ConstantPadND])
179184

180185

181186
def test_constant_pad_nd__delegation__formatless__unsupported_padding(use_qat):
@@ -187,9 +192,7 @@ def test_constant_pad_nd__delegation__formatless__unsupported_padding(use_qat):
187192
).exported_program()
188193

189194
# Make sure the `pad` was NOT delegated.
190-
assert graph_contains_any_of_ops(
191-
exec_program.graph, [exir_ops.edge.aten.constant_pad_nd.default]
192-
)
195+
assert graph_contains_any_of_ops(exec_program.graph, [ConstantPadND])
193196

194197

195198
@pytest.mark.xfail(reason="Regression in Neutron SW 3.0.1 (AIR-14264)", strict=True)
@@ -202,9 +205,7 @@ def test_constant_pad_nd__delegation__channels_first__supported_padding(use_qat)
202205
).exported_program()
203206

204207
# Make sure the `pad` was delegated.
205-
assert not graph_contains_any_of_ops(
206-
exec_program.graph, [exir_ops.edge.aten.constant_pad_nd.default]
207-
)
208+
assert not graph_contains_any_of_ops(exec_program.graph, [ConstantPadND])
208209

209210

210211
def test_constant_pad_nd__delegation__channels_first__unsupported_padding(use_qat):
@@ -216,6 +217,118 @@ def test_constant_pad_nd__delegation__channels_first__unsupported_padding(use_qa
216217
).exported_program()
217218

218219
# Make sure the `pad` was NOT delegated.
219-
assert graph_contains_any_of_ops(
220-
exec_program.graph, [exir_ops.edge.aten.constant_pad_nd.default]
220+
assert graph_contains_any_of_ops(exec_program.graph, [ConstantPadND])
221+
222+
223+
class TestConstantPadNDNewNeutronFlow:
224+
# noinspection PyMethodMayBeStatic
225+
def assert_delegated(self, model, input_shape, mocker, use_qat=False):
226+
graph_verifier = DetailedGraphVerifier(
227+
mocker,
228+
expected_delegated_ops={ConstantPadND: 1},
229+
expected_non_delegated_ops={},
230+
)
231+
232+
lower_run_compare(
233+
model,
234+
input_shape,
235+
graph_verifier,
236+
use_qat=use_qat,
237+
use_new_flow_neutron_c=True,
238+
)
239+
240+
def assert_delegated_and_output_shape_equals(
241+
self, model, input_shape, expected_output_shape, mocker
242+
):
243+
model_builder_spy = mocker.spy(ModelBuilder, "finish")
244+
245+
self.assert_delegated(model, input_shape, mocker)
246+
247+
neutron_ir_subgraph = model_builder_spy.call_args[0][0].get_sub_graph()
248+
assert neutron_ir_subgraph.outputs.tmp_outputs[0].shape.vector == list(
249+
expected_output_shape
250+
)
251+
252+
@pytest.mark.parametrize(
253+
"input_shape, paddings",
254+
[
255+
pytest.param((2,), tuple(range(2)), id="1D, padding H"),
256+
pytest.param((2, 4), tuple(range(2)), id="2D, padding H"),
257+
pytest.param((2, 4), tuple(range(4)), id="2D, padding N, H"),
258+
pytest.param((2, 4, 6), tuple(range(2)), id="3D, padding H"),
259+
pytest.param((2, 4, 6), tuple(range(4)), id="3D, padding C, H"),
260+
pytest.param((2, 4, 6, 8), tuple(range(2)), id="4D, padding W"),
261+
pytest.param((2, 4, 6, 8), tuple(range(4)), id="4D, padding H, W"),
262+
pytest.param((1, 2, 3, 4, 5), tuple(range(2)), id="5D, padding D"),
263+
pytest.param((1, 2, 3, 4, 5), tuple(range(4)), id="5D, padding W, D"),
264+
],
265+
)
266+
def test__basic_nsys_inference(self, mocker, input_shape, paddings, use_qat):
267+
# These test cases are also supported by the old flow.
268+
model = ConstantPadNDModule(paddings)
269+
self.assert_delegated(model, input_shape, mocker, use_qat)
270+
271+
def test__channels_padding(self, mocker):
272+
input_shape = (2, 4, 6)
273+
# These paddings will be applied to the last dimension, which is the channels as the input is formatless.
274+
paddings = (1, 1)
275+
expected_output_shape = (2, 4, 8) # Padded channels.
276+
model = ConstantPadNDModule(paddings)
277+
278+
self.assert_delegated_and_output_shape_equals(
279+
model, input_shape, expected_output_shape, mocker
280+
)
281+
282+
def test__batch_padding(self, mocker):
283+
input_shape = (2, 4, 6)
284+
paddings = (0, 0, 0, 0, 1, 1) # Padding applied to the batch dimension.
285+
expected_output_shape = (4, 4, 6) # Padded batch.
286+
model = ConstantPadNDModule(paddings)
287+
288+
self.assert_delegated_and_output_shape_equals(
289+
model, input_shape, expected_output_shape, mocker
290+
)
291+
292+
@pytest.mark.parametrize("constant", [0.0, -13.37])
293+
def test__specific_constant(self, mocker, constant):
294+
input_shape = (2, 4, 6)
295+
paddings = (1, 1)
296+
model = ConstantPadNDModule(paddings, constant)
297+
self.assert_delegated(model, input_shape, mocker)
298+
299+
@pytest.mark.parametrize(
300+
"input_shape, paddings",
301+
[
302+
pytest.param((1, 4, 6, 8), tuple(range(2)), id="4D, padding W"),
303+
pytest.param((1, 4, 6, 8), tuple(range(4)), id="4D, padding H, W"),
304+
],
221305
)
306+
def test__channels_first(self, mocker, input_shape, paddings):
307+
model = ConstantPadNDConvModule(paddings)
308+
graph_verifier = DetailedGraphVerifier(
309+
mocker,
310+
expected_delegated_ops={ConstantPadND: 1, Convolution: 1},
311+
expected_non_delegated_ops={},
312+
)
313+
314+
lower_run_compare(
315+
model, input_shape, graph_verifier, use_new_flow_neutron_c=True
316+
)
317+
318+
@pytest.mark.xfail(
319+
strict=True,
320+
raises=RuntimeError,
321+
reason="Known issue in Neutron: https://jira.sw.nxp.com/browse/AIR-14624", # @lint-ignore
322+
)
323+
def test__bugged_channels_first_case(self, mocker):
324+
input_shape, paddings = (1, 2, 6, 8), (0, 1, 2, 3, 1, 1)
325+
model = ConstantPadNDConvModule(paddings)
326+
graph_verifier = DetailedGraphVerifier(
327+
mocker,
328+
expected_delegated_ops={ConstantPadND: 1, Convolution: 1},
329+
expected_non_delegated_ops={},
330+
)
331+
332+
lower_run_compare(
333+
model, input_shape, graph_verifier, use_new_flow_neutron_c=True
334+
)

backends/nxp/tests/model_output_comparator.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,13 @@ def compare_sample(self, sample_dir, cpu_output_tensors, npu_output_tensors):
9191
assert np.any(
9292
cpu_tensor
9393
), "Output tensor contains only zeros. This is suspicious."
94-
assert np.allclose(cpu_tensor, npu_tensor, atol=self.atol)
94+
all_close = np.allclose(cpu_tensor, npu_tensor, atol=self.atol)
95+
if not all_close:
96+
max_diff = np.abs(cpu_tensor - npu_tensor).max()
97+
print(
98+
f"NPU output doesn't match reference. Maximum absolute difference: {max_diff}"
99+
)
100+
assert all_close
95101

96102

97103
def _default_postprocess_fn(outputs: np.ndarray, _: str):

backends/nxp/tests/ops_aliases.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
Abs = exir_ops.edge.aten.abs.default
1515
AvgPool2D = exir_ops.edge.aten.avg_pool2d.default
1616
Bmm = exir_ops.edge.aten.bmm.default
17+
ConstantPadND = exir_ops.edge.aten.constant_pad_nd.default
1718
Convolution = exir_ops.edge.aten.convolution.default
1819
DequantizePerChannel = exir_ops.edge.quantized_decomposed.dequantize_per_channel.default
1920
DequantizePerTensor = exir_ops.edge.quantized_decomposed.dequantize_per_tensor.default

0 commit comments

Comments
 (0)