Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 4 additions & 7 deletions .github/workflows/pull.yml
Original file line number Diff line number Diff line change
Expand Up @@ -1235,26 +1235,23 @@ jobs:

# Build and install Executorch
PYTHON_EXECUTABLE=python \
CMAKE_ARGS="-DEXECUTORCH_BUILD_NXP_NEUTRON=ON -DEXECUTORCH_BUILD_NXP_NEUTRON_RUNNER=ON " \
CMAKE_ARGS="-DEXECUTORCH_BUILD_NXP_NEUTRON=ON -DEXECUTORCH_BUILD_NXP_NEUTRON_RUNNER=ON" \
.ci/scripts/setup-linux.sh --build-tool "cmake"

# Install test requirements
pip install -r backends/nxp/requirements-tests-pypi.txt
PYTHON_EXECUTABLE=python bash examples/nxp/setup.sh

# Run pytest
PYTHON_EXECUTABLE=python bash backends/nxp/run_unittests.sh

# Run aot examples:
PYTHON_EXECUTABLE=python bash examples/nxp/run_aot_example.sh cifar10
PYTHON_EXECUTABLE=python bash examples/nxp/run_aot_example.sh mobilenetv2

# Run e2e example with Simulator:
PYTHON_EXECUTABLE=python bash examples/nxp/run.sh cifar10

# Run lightweight model tests:
PYTHON_EXECUTABLE=python pytest -c /dev/null backends/nxp/tests_models/ \
--nxp_runner_path "./examples/nxp/executor_runner/build/nxp_executor_runner"
# Run unit tests:
export NXP_RUNNER_PATH="./examples/nxp/executor_runner/build/nxp_executor_runner"
PYTHON_EXECUTABLE=python bash backends/nxp/run_unittests.sh

test-samsung-quantmodels-linux:
name: test-samsung-quantmodels-linux
Expand Down
Empty file added __init__.py
Empty file.
Empty file added backends/nxp/__init__.py
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Are those inits needed or were they added by IDE?

Empty file.
25 changes: 18 additions & 7 deletions backends/nxp/quantizer/utils.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Copyright 2024-2025 NXP
# Copyright 2024-2026 NXP
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
Expand All @@ -10,7 +10,7 @@
import itertools
from collections import OrderedDict
from collections.abc import Iterable
from typing import Any, Dict, List, Tuple, Type
from typing import Any, Callable, Dict, List, Tuple, Type

import torch
from executorch.backends.nxp.aten_passes.fuse_batch_norm_with_linear_pass import (
Expand All @@ -30,8 +30,10 @@
check_subgraphs_connected,
SourcePartition,
)

from torchao.quantization.pt2e import (
move_exported_model_to_eval,
move_exported_model_to_train,
ObserverOrFakeQuantize,
)
from torchao.quantization.pt2e.quantize_pt2e import (
Expand Down Expand Up @@ -176,16 +178,17 @@ def calibrate_and_quantize(
calibration_inputs: Iterable[tuple[torch.Tensor, ...]],
quantizer: Quantizer,
is_qat: bool = False,
train_fn: Callable[[torch.fx.GraphModule], None] | None = None,
) -> fx.GraphModule:
"""Quantize the provided model.

:param model: Aten model (or it's GraphModule representation) to quantize.
:param calibration_inputs: Either a tuple of calibration input tensors where each element corresponds to a model
input. Or an iterator over such tuples.
:param calibration_inputs: An iterator over tuples of calibration input tensors where each tensor corresponds to a
model input.
:param quantizer: Quantizer to use.
:param is_qat: Whether quantization is done using Quantization Aware Training (QAT) or not.
Note: In QAT mode, training is not performed. Only calibration (in eval mode) is done.

:param train_fn: Optional training function to be called during QAT.
:return: Quantized GraphModule.
"""

Expand All @@ -195,12 +198,20 @@ def calibrate_and_quantize(
if is_qat:
m = prepare_qat_pt2e(model, quantizer)
m = AddSimulatedLinearBatchNormFusionQATPass()(m).graph_module

if train_fn:
m = move_exported_model_to_train(m)
train_fn(m)

m = move_exported_model_to_eval(m)
m = RemoveSimulatedLinearBatchNormFusionQATPass()(m).graph_module
m = FuseBatchNormWithLinearPass()(m).graph_module
else:
m = prepare_pt2e(model, quantizer)

for data in calibration_inputs:
m(*data)
if not is_qat or (is_qat and not train_fn):
for data in calibration_inputs:
m(*data)

if is_qat:
m = RemoveSimulatedLinearBatchNormFusionQATPass()(m).graph_module
Expand Down
Empty file.
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,19 @@
# TODO(Robert Kalmar) In accordance with the "TODO(dbort): Prune /test[s]/ dirs, /third-party/ dirs" in pyproject.toml,
# once the test folders are not installed we can derive the path from current file location: `pathlib.Path(__file__)`
PROJECT_DIR = os.environ.get("PROJECT_DIR")
assert PROJECT_DIR and os.path.exists(PROJECT_DIR)
if not PROJECT_DIR:
# Auto-detect: The PROJECT_DIR env variable is set by the conftest.py in backends.nxp.tests.conftest. But unittests
# don't use the conftest, so this variable is not set -> set it manually here in that case.
PROJECT_DIR = str(pathlib.Path(__file__).parent.parent.parent.parent)
assert PROJECT_DIR and os.path.exists(
PROJECT_DIR
), f"Invalid PROJECT_DIR env variable: `{PROJECT_DIR}`."

OUTPUTS_DIR = pathlib.Path(os.getcwd()) / ".outputs"

NSYS_PATH = pathlib.Path(shutil.which("nsys"))
NSYS_CONFIG_PATH = os.path.join(
PROJECT_DIR, "backends", "nxp", "tests_models", "neutron-imxrt700.ini"
PROJECT_DIR, "backends", "nxp", "tests", "neutron-imxrt700.ini"
)
NSYS_FIRMWARE_PATH = os.path.join(
os.path.dirname(eiq_neutron_sdk.__file__),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,6 @@

logger.debug("Importing from executorch-integration")
except ImportError:
import executorch.backends.nxp.tests_models.config as test_config # noqa F401
import executorch.backends.nxp.tests.config as test_config # noqa F401

logger.debug("Importing from executorch")
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
import pathlib
import shutil

from executorch.backends.nxp.tests_models.outputs_dir_importer import outputs_dir
from executorch.backends.nxp.tests.outputs_dir_importer import outputs_dir


def pytest_addoption(parser):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@
import numpy as np
import torch
from executorch.backends.nxp.backend.ir.converter.conversion import translator
from executorch.backends.nxp.tests_models.calibration_dataset import CalibrationDataset
from executorch.backends.nxp.tests_models.model_input_spec import ModelInputSpec
from executorch.backends.nxp.tests.calibration_dataset import CalibrationDataset
from executorch.backends.nxp.tests.executorch_pipeline import ModelInputSpec
from torch import Tensor


Expand Down
Loading
Loading