Skip to content

Commit eb19f24

Browse files
NXP backend: Unify testing infrastructure. (pytorch#19112)
### Summary Unify infrastructure for tests comparing edge dialect <-> Neutron IR with tests comparing lowered edge programs <-> lowered Neutron delegated programs. ### Test plan Tested by all existing tests. cc @robert-kalmar @JakeStevens @digantdesai
1 parent 4ac044b commit eb19f24

47 files changed

Lines changed: 271 additions & 384 deletions

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

.github/workflows/pull.yml

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1235,26 +1235,26 @@ jobs:
12351235
12361236
# Build and install Executorch
12371237
PYTHON_EXECUTABLE=python \
1238-
CMAKE_ARGS="-DEXECUTORCH_BUILD_NXP_NEUTRON=ON -DEXECUTORCH_BUILD_NXP_NEUTRON_RUNNER=ON " \
1239-
.ci/scripts/setup-linux.sh --build-tool "cmake"
1238+
CMAKE_ARGS="-DEXECUTORCH_BUILD_NXP_NEUTRON=ON -DEXECUTORCH_BUILD_NXP_NEUTRON_RUNNER=ON \
1239+
-DEXECUTORCH_BUILD_KERNELS_PORTABLE=ON -DEXECUTORCH_BUILD_PYBIND=ON -DEXECUTORCH_BUILD_EXTENSION_MODULE=ON \
1240+
-DEXECUTORCH_BUILD_EXTENSION_DATA_LOADER=ON -DEXECUTORCH_BUILD_EXTENSION_FLAT_TENSOR=ON \
1241+
-DEXECUTORCH_BUILD_EXTENSION_TENSOR=ON -DEXECUTORCH_BUILD_EXTENSION_NAMED_DATA_MAP=ON" \
1242+
.ci/scripts/setup-linux.sh --build-tool "cmake" --editable true
12401243
12411244
# Install test requirements
12421245
pip install -r backends/nxp/requirements-tests-pypi.txt
12431246
PYTHON_EXECUTABLE=python bash examples/nxp/setup.sh
12441247
1245-
# Run pytest
1246-
PYTHON_EXECUTABLE=python bash backends/nxp/run_unittests.sh
1247-
12481248
# Run aot examples:
12491249
PYTHON_EXECUTABLE=python bash examples/nxp/run_aot_example.sh cifar10
12501250
PYTHON_EXECUTABLE=python bash examples/nxp/run_aot_example.sh mobilenetv2
12511251
12521252
# Run e2e example with Simulator:
12531253
PYTHON_EXECUTABLE=python bash examples/nxp/run.sh cifar10
12541254
1255-
# Run lightweight model tests:
1256-
PYTHON_EXECUTABLE=python pytest -c /dev/null backends/nxp/tests_models/ \
1257-
--nxp_runner_path "./examples/nxp/executor_runner/build/nxp_executor_runner"
1255+
# Run unit tests:
1256+
PYTHON_EXECUTABLE=python NXP_RUNNER_PATH="./examples/nxp/executor_runner/build/nxp_executor_runner" \
1257+
bash backends/nxp/run_unittests.sh
12581258
12591259
test-samsung-quantmodels-linux:
12601260
name: test-samsung-quantmodels-linux

backends/nxp/__init__.py

Whitespace-only changes.

backends/nxp/backend/ir/converter/conversion/translator.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
# Copyright 2023 Martin Pavella
2-
# Copyright 2023-2025 NXP
2+
# Copyright 2023-2026 NXP
33
#
44
# License: MIT
55
# See the LICENSE_MIT for more details.
@@ -547,8 +547,8 @@ def convert_data_type(torch_type: torch.TensorType) -> TensorType:
547547
)
548548

549549

550-
def torch_type_to_numpy_type(torch_type: torch.TensorType) -> np.ScalarType:
551-
"""Convert Torch DataType to NeutronIR TensorType"""
550+
def torch_type_to_numpy_type(torch_type: torch.TensorType) -> np.dtype:
551+
"""Convert Torch data type to Numpy data type."""
552552

553553
if torch_type == torch.float32:
554554
return np.dtype(np.float32)

backends/nxp/quantizer/utils.py

Lines changed: 18 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
# Copyright (c) Meta Platforms, Inc. and affiliates.
2-
# Copyright 2024-2025 NXP
2+
# Copyright 2024-2026 NXP
33
# All rights reserved.
44
#
55
# This source code is licensed under the BSD-style license found in the
@@ -10,7 +10,7 @@
1010
import itertools
1111
from collections import OrderedDict
1212
from collections.abc import Iterable
13-
from typing import Any, Dict, List, Tuple, Type
13+
from typing import Any, Callable, Dict, List, Tuple, Type
1414

1515
import torch
1616
from executorch.backends.nxp.aten_passes.fuse_batch_norm_with_linear_pass import (
@@ -30,8 +30,10 @@
3030
check_subgraphs_connected,
3131
SourcePartition,
3232
)
33+
3334
from torchao.quantization.pt2e import (
3435
move_exported_model_to_eval,
36+
move_exported_model_to_train,
3537
ObserverOrFakeQuantize,
3638
)
3739
from torchao.quantization.pt2e.quantize_pt2e import (
@@ -176,16 +178,17 @@ def calibrate_and_quantize(
176178
calibration_inputs: Iterable[tuple[torch.Tensor, ...]],
177179
quantizer: Quantizer,
178180
is_qat: bool = False,
181+
train_fn: Callable[[torch.fx.GraphModule], None] | None = None,
179182
) -> fx.GraphModule:
180183
"""Quantize the provided model.
181184
182185
:param model: Aten model (or it's GraphModule representation) to quantize.
183-
:param calibration_inputs: Either a tuple of calibration input tensors where each element corresponds to a model
184-
input. Or an iterator over such tuples.
186+
:param calibration_inputs: An iterator over tuples of calibration input tensors where each tensor corresponds to a
187+
model input.
185188
:param quantizer: Quantizer to use.
186189
:param is_qat: Whether quantization is done using Quantization Aware Training (QAT) or not.
187190
Note: In QAT mode, training is not performed. Only calibration (in eval mode) is done.
188-
191+
:param train_fn: Optional training function to be called during QAT.
189192
:return: Quantized GraphModule.
190193
"""
191194

@@ -195,12 +198,20 @@ def calibrate_and_quantize(
195198
if is_qat:
196199
m = prepare_qat_pt2e(model, quantizer)
197200
m = AddSimulatedLinearBatchNormFusionQATPass()(m).graph_module
201+
202+
if train_fn:
203+
m = move_exported_model_to_train(m)
204+
train_fn(m)
205+
198206
m = move_exported_model_to_eval(m)
207+
m = RemoveSimulatedLinearBatchNormFusionQATPass()(m).graph_module
208+
m = FuseBatchNormWithLinearPass()(m).graph_module
199209
else:
200210
m = prepare_pt2e(model, quantizer)
201211

202-
for data in calibration_inputs:
203-
m(*data)
212+
if not is_qat or (is_qat and not train_fn):
213+
for data in calibration_inputs:
214+
m(*data)
204215

205216
if is_qat:
206217
m = RemoveSimulatedLinearBatchNormFusionQATPass()(m).graph_module

backends/nxp/tests/__init__.py

Whitespace-only changes.
File renamed without changes.
Lines changed: 22 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -15,13 +15,19 @@
1515
# TODO(Robert Kalmar) In accordance with the "TODO(dbort): Prune /test[s]/ dirs, /third-party/ dirs" in pyproject.toml,
1616
# once the test folders are not installed we can derive the path from current file location: `pathlib.Path(__file__)`
1717
PROJECT_DIR = os.environ.get("PROJECT_DIR")
18-
assert PROJECT_DIR and os.path.exists(PROJECT_DIR)
18+
if not PROJECT_DIR:
19+
# Auto-detect: The PROJECT_DIR env variable is set by the conftest.py in backends.nxp.tests.conftest. But unittests
20+
# don't use the conftest, so this variable is not set -> set it manually here in that case.
21+
PROJECT_DIR = str(pathlib.Path(__file__).parent.parent.parent.parent)
22+
assert os.path.exists(
23+
PROJECT_DIR
24+
), f"Invalid PROJECT_DIR env variable: `{PROJECT_DIR}`."
1925

2026
OUTPUTS_DIR = pathlib.Path(os.getcwd()) / ".outputs"
2127

2228
NSYS_PATH = pathlib.Path(shutil.which("nsys"))
2329
NSYS_CONFIG_PATH = os.path.join(
24-
PROJECT_DIR, "backends", "nxp", "tests_models", "neutron-imxrt700.ini"
30+
PROJECT_DIR, "backends", "nxp", "tests", "neutron-imxrt700.ini"
2531
)
2632
NSYS_FIRMWARE_PATH = os.path.join(
2733
os.path.dirname(eiq_neutron_sdk.__file__),
@@ -34,4 +40,17 @@
3440
# The NXP_RUNNER_PATH env variable is either defined by pytest when using the CLI argument --nxp_executor_path or
3541
# a standard environment variable.
3642
NEUTRON_TEST_PATH = os.environ.get("NXP_RUNNER_PATH")
37-
assert NEUTRON_TEST_PATH and os.path.exists(NEUTRON_TEST_PATH)
43+
if not NEUTRON_TEST_PATH:
44+
# Auto-detect: The NXP_RUNNER_PATH env variable is set by the conftest.py in backends.nxp.tests.conftest. But
45+
# unittests don't use the conftest, so this variable is not set -> set it manually here in that case.
46+
NEUTRON_TEST_PATH = (
47+
pathlib.Path(PROJECT_DIR)
48+
/ "examples"
49+
/ "nxp"
50+
/ "executor_runner"
51+
/ "build"
52+
/ "nxp_executor_runner"
53+
)
54+
assert os.path.exists(
55+
NEUTRON_TEST_PATH
56+
), f"Invalid NXP_RUNNER_PATH env variable: `{NEUTRON_TEST_PATH}`."

backends/nxp/tests_models/config_importer.py renamed to backends/nxp/tests/config_importer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,6 @@
1212

1313
logger.debug("Importing from executorch-integration")
1414
except ImportError:
15-
import executorch.backends.nxp.tests_models.config as test_config # noqa F401
15+
import executorch.backends.nxp.tests.config as test_config # noqa F401
1616

1717
logger.debug("Importing from executorch")
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
import pathlib
99
import shutil
1010

11-
from executorch.backends.nxp.tests_models.outputs_dir_importer import outputs_dir
11+
from executorch.backends.nxp.tests.outputs_dir_importer import outputs_dir
1212

1313

1414
def pytest_addoption(parser):

backends/nxp/tests_models/dataset_creator.py renamed to backends/nxp/tests/dataset_creator.py

Lines changed: 16 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,11 @@
1414
import numpy as np
1515
import torch
1616
from executorch.backends.nxp.backend.ir.converter.conversion import translator
17-
from executorch.backends.nxp.tests_models.calibration_dataset import CalibrationDataset
18-
from executorch.backends.nxp.tests_models.model_input_spec import ModelInputSpec
17+
from executorch.backends.nxp.backend.ir.converter.conversion.translator import (
18+
torch_type_to_numpy_type,
19+
)
20+
from executorch.backends.nxp.tests.calibration_dataset import CalibrationDataset
21+
from executorch.backends.nxp.tests.executorch_pipeline import ModelInputSpec
1922
from torch import Tensor
2023

2124

@@ -100,10 +103,15 @@ def _gen_samples(
100103
case _:
101104
raise ValueError(f"Unsupported dim_order: {spec.dim_order}")
102105

103-
sample_vector = rng.random(np.prod(shape), spec.type).reshape(shape)
104-
sample_vector.tofile(
105-
os.path.join(sample_dir, f"{str(spec_idx).zfill(2)}.bin")
106+
sample_vector = rng.random(
107+
np.prod(shape), torch_type_to_numpy_type(spec.dtype)
108+
).reshape(shape)
109+
file_name = (
110+
f"{str(spec_idx).zfill(2)}.bin"
111+
if len(input_spec) > 1
112+
else f"{str(idx).zfill(4)}.bin"
106113
)
114+
sample_vector.tofile(os.path.join(sample_dir, file_name))
107115

108116

109117
class CopyDatasetCreator(DatasetCreator):
@@ -132,9 +140,9 @@ def generate_samples(self, dataset_dir, input_spec) -> tuple[str, str]:
132140

133141
if input_spec[0].dim_order == torch.channels_last:
134142
# Permute the sample to channels last and store it in the testing dataset.
135-
tensor = np.fromfile(sample_path, dtype=input_spec[0].type).reshape(
136-
input_spec[0].shape
137-
)
143+
tensor = np.fromfile(
144+
sample_path, dtype=torch_type_to_numpy_type(input_spec[0].dtype)
145+
).reshape(input_spec[0].shape)
138146

139147
if (
140148
list(tensor.shape) == list(input_spec[0].shape)

0 commit comments

Comments
 (0)