|
| 1 | +# Copyright 2026 Arm Limited and/or its affiliates. |
| 2 | +# |
| 3 | +# This source code is licensed under the BSD-style license found in the |
| 4 | +# LICENSE file in the root directory of this source tree. |
| 5 | + |
| 6 | +from typing import Set, Type |
| 7 | + |
| 8 | +import torch |
| 9 | +from executorch.backends.arm._passes import ArmPass |
| 10 | +from executorch.backends.arm.tosa.specification import get_context_spec |
| 11 | +from executorch.exir.dialects._ops import ops as exir_ops |
| 12 | +from executorch.exir.pass_base import ExportPass, NodeMetadata |
| 13 | + |
| 14 | + |
| 15 | +class InsertDataLayoutCastsPass(ArmPass): |
| 16 | + """Insert casts around data layout operators when their dtype is not |
| 17 | + supported by the active TOSA specification. |
| 18 | +
|
| 19 | + This pass targets operators that lower to TOSA data layout operators: |
| 20 | + CONCAT, PAD, RESHAPE, REVERSE, SLICE, TILE, and TRANSPOSE. |
| 21 | +
|
| 22 | + Example: |
| 23 | + Before pass: |
| 24 | + y = transpose(x) # x.data.dtype == torch.int32 |
| 25 | + After pass: |
| 26 | + xfp32 = _to_dim_order_copy(x, dtype=torch.float32) |
| 27 | + yfp32 = transpose(xfp32) |
| 28 | + y = _to_dim_order_copy(yfp32, dtype=torch.int32) |
| 29 | +
|
| 30 | + """ |
| 31 | + |
| 32 | + _passes_required_after: Set[Type[ExportPass]] = set() |
| 33 | + |
| 34 | + _cast_op = exir_ops.edge.dim_order_ops._to_dim_order_copy.default |
| 35 | + |
| 36 | + _concat_ops = { |
| 37 | + exir_ops.edge.aten.cat.default, |
| 38 | + exir_ops.edge.aten.concatenate.default, |
| 39 | + } |
| 40 | + _single_input_ops = { |
| 41 | + exir_ops.backend.tosa.TRANSPOSE.default, |
| 42 | + exir_ops.edge.aten.constant_pad_nd.default, |
| 43 | + exir_ops.edge.aten.view_copy.default, |
| 44 | + exir_ops.edge.aten.repeat.default, |
| 45 | + exir_ops.edge.aten.permute_copy.default, |
| 46 | + exir_ops.edge.aten.slice_copy.Tensor, |
| 47 | + exir_ops.edge.aten.flip.default, |
| 48 | + } |
| 49 | + targeted_ops = _concat_ops | _single_input_ops |
| 50 | + |
| 51 | + _fp_to_int_map = { |
| 52 | + torch.float16: torch.int16, |
| 53 | + torch.bfloat16: torch.int16, |
| 54 | + torch.float32: torch.int32, |
| 55 | + } |
| 56 | + |
| 57 | + _int_to_fp_map = { |
| 58 | + torch.int8: torch.float16, # This doubles the size after casting, but is very unlikely to occur in practice since int8 is only ever used by LOGICAL_SHIFT and CAST/RESCALE ops in PRO-FP. |
| 59 | + torch.int16: torch.float16, |
| 60 | + torch.int32: torch.float32, |
| 61 | + } |
| 62 | + |
| 63 | + def call_operator(self, op, args, kwargs, meta): |
| 64 | + if op not in self.targeted_ops: |
| 65 | + return super().call_operator(op, args, kwargs, meta) |
| 66 | + |
| 67 | + if op in self._concat_ops: |
| 68 | + # Cast to largest dtype |
| 69 | + dtypes = [arg.data.dtype for arg in args[0]] |
| 70 | + dtype_sizes = [dtype.itemsize for dtype in dtypes] |
| 71 | + dtype = dtypes[dtype_sizes.index(max(dtype_sizes))] |
| 72 | + else: |
| 73 | + dtype = args[0].data.dtype |
| 74 | + |
| 75 | + spec = get_context_spec() |
| 76 | + dtype_is_integer = not dtype.is_floating_point and dtype != torch.bool |
| 77 | + if dtype_is_integer and not spec.support_integer(): |
| 78 | + supported_dtype = self._int_to_fp_map.get(dtype, None) |
| 79 | + elif dtype.is_floating_point and not spec.support_float(): |
| 80 | + supported_dtype = self._fp_to_int_map.get(dtype, None) |
| 81 | + else: |
| 82 | + return super().call_operator(op, args, kwargs, meta) |
| 83 | + |
| 84 | + # CONCATENATE does not support int16 w/o INT16 extension like other ops |
| 85 | + if ( |
| 86 | + op in self._concat_ops |
| 87 | + and supported_dtype == torch.int16 |
| 88 | + and not spec.support_extension("int16") |
| 89 | + ): |
| 90 | + supported_dtype = None |
| 91 | + |
| 92 | + if supported_dtype is None: |
| 93 | + raise TypeError( |
| 94 | + f"Data type {dtype} of operator {op} is not supported by" |
| 95 | + f" {spec}, and casting is currently not supported by {self.__class__.__name__}." |
| 96 | + ) |
| 97 | + |
| 98 | + if op in self._concat_ops: |
| 99 | + x_casted = [] |
| 100 | + for arg in args[0]: |
| 101 | + x_casted.append( |
| 102 | + super().call_operator( |
| 103 | + self._cast_op, |
| 104 | + (arg,), |
| 105 | + {"dtype": supported_dtype}, |
| 106 | + NodeMetadata(arg.node.meta), |
| 107 | + updated=True, |
| 108 | + ) |
| 109 | + ) |
| 110 | + y_casted = super().call_operator( |
| 111 | + op, (x_casted, *args[1:]), kwargs, meta, updated=True |
| 112 | + ) |
| 113 | + |
| 114 | + else: |
| 115 | + x_casted = super().call_operator( |
| 116 | + self._cast_op, |
| 117 | + (args[0],), |
| 118 | + {"dtype": supported_dtype}, |
| 119 | + NodeMetadata(args[0].node.meta), |
| 120 | + updated=True, |
| 121 | + ) |
| 122 | + y_casted = super().call_operator( |
| 123 | + op, (x_casted, *args[1:]), kwargs, meta, updated=True |
| 124 | + ) |
| 125 | + |
| 126 | + y = super().call_operator( |
| 127 | + self._cast_op, (y_casted,), {"dtype": dtype}, meta, updated=True |
| 128 | + ) |
| 129 | + return y |
0 commit comments