Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions .lintrunner.toml
Original file line number Diff line number Diff line change
Expand Up @@ -376,7 +376,6 @@ exclude_patterns = [
'scripts/check_binary_dependencies.py',
'profiler/test/test_profiler_e2e.py',
'backends/arm/test/ops/*.py',
'backends/cortex_m/test/**/*.py',
]
command = [
'python',
Expand Down Expand Up @@ -410,7 +409,6 @@ include_patterns = [
exclude_patterns = [
'third-party/**',
'**/third-party/**',
'backends/cortex_m/test/**/*.py',
]
command = [
'python',
Expand Down
4 changes: 2 additions & 2 deletions backends/cortex_m/test/misc/test_portable_int8.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
from executorch.backends.arm._passes import FoldAndAnnotateQParamsPass
from executorch.backends.arm._passes.arm_pass_utils import get_first_fake_tensor
from executorch.backends.arm.quantizer.arm_quantizer_utils import SharedQspecQuantizer
from executorch.backends.arm.test.common import parametrize
from executorch.backends.arm.test.common import parametrize, xfail_type
from executorch.backends.cortex_m.quantizer.quantizer import CortexMQuantizer
from executorch.backends.cortex_m.test.tester import CortexMTester
from executorch.backends.test.harness.stages import StageType
Expand Down Expand Up @@ -660,7 +660,7 @@ def _quantize_and_export(
),
}

xfails = {
xfails: dict[str, xfail_type] = {
"contiguous": "MLETORCH-1863: Contiguos no-op is removed in to-edge, leading to unnecessary Q-DQ-Q-DQ chain.",
"clamp": "MLETORCH-1864: Support non-fused clamp-type activations.",
"clamp_tensor": "MLETORCH-1864: Support non-fused clamp-type activations.",
Expand Down
28 changes: 14 additions & 14 deletions backends/cortex_m/test/misc/test_quantization.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

import torch
from executorch.backends.arm._passes.arm_pass_utils import get_first_fake_tensor
from executorch.backends.arm.test.common import parametrize
from executorch.backends.arm.test.common import parametrize, xfail_type
from executorch.backends.cortex_m.test.tester import (
CortexMTester,
McuTestCase,
Expand Down Expand Up @@ -141,8 +141,8 @@ def forward(self, x, y):
class SharedQspecInputForkXConstant(torch.nn.Module):
"""Shared qspec cluster with an input fork with left input as global constant."""

ops_before_transforms = {}
ops_after_transforms = {}
ops_before_transforms: dict[str, int] = {}
ops_after_transforms: dict[str, int] = {}
constant = torch.tensor(5.0)

def forward(self, x):
Expand All @@ -152,8 +152,8 @@ def forward(self, x):
class SharedQspecInputForkYConstant(torch.nn.Module):
"""Shared qspec cluster with an input fork with left input as local constant."""

ops_before_transforms = {}
ops_after_transforms = {}
ops_before_transforms: dict[str, int] = {}
ops_after_transforms: dict[str, int] = {}

def forward(self, x):
return torch.maximum(x, torch.tensor(5.0))
Expand Down Expand Up @@ -259,8 +259,8 @@ def forward(self, x):


class SharedQspecSurroundedQuantizedOpConstant(torch.nn.Module):
ops_before_transforms = {}
ops_after_transforms = {}
ops_before_transforms: dict[str, int] = {}
ops_after_transforms: dict[str, int] = {}

def forward(self, x):
x1 = torch.clone(x)
Expand All @@ -270,16 +270,16 @@ def forward(self, x):


class SharedQspecSub(torch.nn.Module):
ops_before_transforms = {}
ops_after_transforms = {}
ops_before_transforms: dict[str, int] = {}
ops_after_transforms: dict[str, int] = {}

def forward(self, x, y):
return torch.clone(x - y)


class SharedQspecCompetingQspecs(torch.nn.Module):
ops_before_transforms = {}
ops_after_transforms = {}
ops_before_transforms: dict[str, int] = {}
ops_after_transforms: dict[str, int] = {}

def __init__(self):
super().__init__()
Expand All @@ -299,8 +299,8 @@ def forward(self, x):


class SharedQspecNoQspecs(torch.nn.Module):
ops_before_transforms = {}
ops_after_transforms = {}
ops_before_transforms: dict[str, int] = {}
ops_after_transforms: dict[str, int] = {}

def forward(self, x):
z = torch.clone(x - x)
Expand Down Expand Up @@ -358,7 +358,7 @@ def forward(self, x):
),
}

xfails = {
xfails: dict[str, xfail_type] = {
"surrounded_quantized_op_constant": "Numerical error since the add is forced to have non-correct qparams.",
}

Expand Down
6 changes: 3 additions & 3 deletions backends/cortex_m/test/models/test_mobilenet_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@

from executorch.backends.cortex_m.test.tester import CortexMTester, McuTestCase
from executorch.backends.test.harness.stages import StageType
from torchvision import models
from torchvision import models # type: ignore[import-untyped]


ops_before_transforms: dict[str, int] = {
Expand Down Expand Up @@ -56,7 +56,7 @@

@parametrize("test_case", test_cases)
def test_dialect_mv2(test_case):
inputs = test_case.example_inputs()
inputs = test_case.get_example_inputs()
tester = CortexMTester(test_case.model, inputs)
tester.test_dialect(
ops_before_transforms,
Expand All @@ -78,7 +78,7 @@ def test_dialect_mv2(test_case):
strict=False,
)
def test_implementation_mv2(test_case):
inputs = test_case.example_inputs()
inputs = test_case.get_example_inputs()
tester = CortexMTester(test_case.model, inputs)
tester.test_implementation(
qtol=10,
Expand Down
6 changes: 3 additions & 3 deletions backends/cortex_m/test/models/test_mobilenet_v3.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@

from executorch.backends.cortex_m.test.tester import CortexMTester, McuTestCase
from executorch.backends.test.harness.stages import StageType
from torchvision import models
from torchvision import models # type: ignore[import-untyped]


ops_before_transforms: dict[str, int] = {
Expand Down Expand Up @@ -65,7 +65,7 @@
strict=False,
)
def test_dialect_mv3(test_case):
inputs = test_case.example_inputs()
inputs = test_case.get_example_inputs()
tester = CortexMTester(test_case.model, inputs)
tester.test_dialect(
ops_before_transforms,
Expand All @@ -89,7 +89,7 @@ def test_dialect_mv3(test_case):
strict=False,
)
def test_implementation_mv3(test_case):
inputs = test_case.example_inputs()
inputs = test_case.get_example_inputs()
tester = CortexMTester(test_case.model, inputs)
tester.test_implementation(
qtol=20,
Expand Down
6 changes: 3 additions & 3 deletions backends/cortex_m/test/ops/test_add.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@


import torch
from executorch.backends.arm.test.common import parametrize
from executorch.backends.arm.test.common import parametrize, xfail_type
from executorch.backends.cortex_m.test.tester import (
CortexMTester,
McuTestCase,
Expand Down Expand Up @@ -152,10 +152,10 @@ class CortexMAlphaAdd(ModelAlpha):
}


xfails_implementation = {
xfails_implementation: dict[str, xfail_type] = {
"alpha": "Expecting kwargs for aten op IR to be empty - alpha arg not supported.",
}
xfails_dialect = xfails_implementation | {
xfails_dialect: dict[str, xfail_type] = xfails_implementation | {
# Cortex-M quantizer will not quantize additions that require broadcasting
# leading to the add op not being replaced by a cortex-m specific implementation
"broadcast_1": "Broadcasting is not supported in Cortex-M backend",
Expand Down
17 changes: 8 additions & 9 deletions backends/cortex_m/test/ops/test_conv.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
# Copyright 2025 Arm Limited and/or its affiliates.
# Copyright 2025-2026 Arm Limited and/or its affiliates.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.


import torch
from executorch.backends.arm.test.common import parametrize
from executorch.backends.arm.test.common import parametrize, xfail_type
from executorch.backends.cortex_m.test.tester import (
CortexMTester,
McuTestCase,
Expand All @@ -14,8 +14,8 @@


class CortexMConv1D(torch.nn.Module):
ops_before_transforms = {}
ops_after_transforms = {}
ops_before_transforms: dict[str, int] = {}
ops_after_transforms: dict[str, int] = {}

def __init__(self, *args, **kwargs):
super().__init__()
Expand Down Expand Up @@ -72,9 +72,8 @@ def forward(self, x):


class CortexMConv3D(torch.nn.Module):
ops_before_transforms = {}

ops_after_transforms = {}
ops_before_transforms: dict[str, int] = {}
ops_after_transforms: dict[str, int] = {}

def __init__(self, *args, **kwargs):
super().__init__()
Expand Down Expand Up @@ -313,7 +312,7 @@ def forward(self, x):
}


xfails_dialect = {
xfails_dialect: dict[str, xfail_type] = {
"conv2d_dilation": "NotImplementedError: 'slow_conv_dilated<>' not implemented for 'Int'",
"conv1d": "Currently not supported.",
"conv2d_nchw": "Currently not supported.",
Expand All @@ -330,7 +329,7 @@ def test_dialect_conv2d(test_case):
)


xfails_implementation = {
xfails_implementation: dict[str, xfail_type] = {
"conv1d": "Currently not supported.",
"conv3d": "Currently not supported.",
}
Expand Down
7 changes: 4 additions & 3 deletions backends/cortex_m/test/ops/test_conv_transpose.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,13 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# Copyright 2025-2026 Arm Limited and/or its affiliates.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.


import torch
from executorch.backends.arm.test.common import parametrize
from executorch.backends.arm.test.common import parametrize, xfail_type
from executorch.backends.cortex_m.test.tester import (
CortexMTester,
McuTestCase,
Expand Down Expand Up @@ -220,7 +221,7 @@ def forward(self, x):
),
}

xfails_dialect = {
xfails_dialect: dict[str, xfail_type] = {
# Grouped convolutions not supported by CMSIS-NN - rejected during quantization
"conv_transpose2d_groups_2": "Grouped transpose conv not supported by CMSIS-NN",
"conv_transpose2d_depthwise": "Depthwise transpose conv not supported by CMSIS-NN",
Expand Down Expand Up @@ -248,7 +249,7 @@ def test_dialect_conv_transpose2d(test_case):
# Implementation xfails: empty because unsupported configurations are now
# rejected at AOT time by the quantizer filter, so they fall back to portable
# ops and work correctly. Only xfails_dialect needs to track these.
xfails_implementation = {}
xfails_implementation: dict[str, xfail_type] = {}


@parametrize("test_case", test_cases, xfails=xfails_implementation)
Expand Down
6 changes: 3 additions & 3 deletions backends/cortex_m/test/ops/test_lstm.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright 2025 Arm Limited and/or its affiliates.
# Copyright 2025-2026 Arm Limited and/or its affiliates.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
Expand Down Expand Up @@ -30,7 +30,7 @@ class CortexMLSTM(torch.nn.Module):
"executorch_exir_dialects_edge__ops_aten_cat_default": 1,
}

ops_after_transforms = {}
ops_after_transforms: dict[str, int] = {}

def __init__(self, input_size: int = 4, hidden_size: int = 3) -> None:
super().__init__()
Expand Down Expand Up @@ -59,7 +59,7 @@ class CortexMQuantizableLSTM(torch.nn.Module):
"executorch_exir_dialects_edge__ops_quantized_decomposed_quantize_per_tensor_default": 27,
}

ops_after_transforms = {}
ops_after_transforms: dict[str, int] = {}

def __init__(self, input_size: int = 4, hidden_size: int = 3) -> None:
super().__init__()
Expand Down
6 changes: 3 additions & 3 deletions backends/cortex_m/test/ops/test_max_pool2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@


import torch
from executorch.backends.arm.test.common import parametrize
from executorch.backends.arm.test.common import parametrize, xfail_type
from executorch.backends.cortex_m.test.tester import (
CortexMTester,
McuTestCase,
Expand Down Expand Up @@ -82,10 +82,10 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
),
}

xfails_max_pool2d = {
xfails_max_pool2d: dict[str, xfail_type] = {
"maxpool_2x2_indices": (
"Indices output not supported; quantizer does not handle getitem on max_pool2d_with_indices.",
(NotImplementedError, AssertionError, RuntimeError, Exception),
Exception,
),
}

Expand Down
6 changes: 3 additions & 3 deletions backends/cortex_m/test/ops/test_minimum.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
# Copyright 2025 Arm Limited and/or its affiliates.
# Copyright 2025-2026 Arm Limited and/or its affiliates.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.


import torch
from executorch.backends.arm.test.common import parametrize
from executorch.backends.arm.test.common import parametrize, xfail_type
from executorch.backends.cortex_m.test.tester import (
CortexMTester,
McuTestCase,
Expand Down Expand Up @@ -87,7 +87,7 @@ def forward(self, x, y):
}


xfails = {}
xfails: dict[str, xfail_type] = {}


@parametrize("test_case", test_cases, xfails=xfails)
Expand Down
6 changes: 3 additions & 3 deletions backends/cortex_m/test/ops/test_mul.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
# Copyright 2025 Arm Limited and/or its affiliates.
# Copyright 2025-2026 Arm Limited and/or its affiliates.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.


import torch
from executorch.backends.arm.test.common import parametrize
from executorch.backends.arm.test.common import parametrize, xfail_type
from executorch.backends.cortex_m.test.tester import (
CortexMTester,
McuTestCase,
Expand Down Expand Up @@ -127,7 +127,7 @@ class CortexMTensorMul(Model):
}


xfail_cases_dialect = {
xfail_cases_dialect: dict[str, xfail_type] = {
# Cortex-M quantizer will not quantize multiplicaitons that require broadcasting
# leading to the mul op not being replaced by a cortex-m specific implementation
"broadcast_1": "Broadcasting is not supported in Cortex-M backend",
Expand Down
Loading
Loading