|
| 1 | +# Copyright (c) Qualcomm Innovation Center, Inc. |
| 2 | +# All rights reserved |
| 3 | +# |
| 4 | +# This source code is licensed under the BSD-style license found in the |
| 5 | +# LICENSE file in the root directory of this source tree. |
| 6 | + |
| 7 | +import torch |
| 8 | +from executorch.exir.dialects._ops import ops as exir_ops |
| 9 | +from executorch.exir.dialects.edge._ops import EdgeOpOverload |
| 10 | +from executorch.exir.pass_base import ExportPass, PassResult |
| 11 | +from torchao.quantization.pt2e.utils import get_new_attr_name_with_prefix |
| 12 | + |
| 13 | +from .utils import copy_meta, get_const_node |
| 14 | + |
| 15 | + |
| 16 | +class DecomposeRemainder(ExportPass): |
| 17 | + """ |
| 18 | + Decompose remainder.Scalar and remainder.Tensor using the identity: |
| 19 | + remainder(x, y) = x - floor(x / y) * y |
| 20 | + """ |
| 21 | + |
| 22 | + def __init__(self): |
| 23 | + super(DecomposeRemainder, self).__init__() |
| 24 | + self.remainder_targets = { |
| 25 | + torch.ops.aten.remainder.Scalar, |
| 26 | + torch.ops.aten.remainder.Tensor, |
| 27 | + exir_ops.edge.aten.remainder.Scalar, |
| 28 | + exir_ops.edge.aten.remainder.Tensor, |
| 29 | + } |
| 30 | + |
| 31 | + def call(self, graph_module: torch.fx.GraphModule): |
| 32 | + graph = graph_module.graph |
| 33 | + # Cache scalar:node mappings to avoid duplicate buffer registrations if the same scalar divisor appears in multiple remainder ops |
| 34 | + const_cache = {} |
| 35 | + |
| 36 | + for node in list(graph.nodes): |
| 37 | + if node.op == "call_function" and node.target in self.remainder_targets: |
| 38 | + x_node = node.args[0] |
| 39 | + y_arg = node.args[1] |
| 40 | + is_edge = isinstance(node.target, EdgeOpOverload) |
| 41 | + meta = node.meta |
| 42 | + |
| 43 | + div_op = ( |
| 44 | + exir_ops.edge.aten.div.Tensor |
| 45 | + if is_edge |
| 46 | + else torch.ops.aten.div.Tensor |
| 47 | + ) |
| 48 | + floor_op = ( |
| 49 | + exir_ops.edge.aten.floor.default |
| 50 | + if is_edge |
| 51 | + else torch.ops.aten.floor.default |
| 52 | + ) |
| 53 | + mul_op = ( |
| 54 | + exir_ops.edge.aten.mul.Tensor |
| 55 | + if is_edge |
| 56 | + else torch.ops.aten.mul.Tensor |
| 57 | + ) |
| 58 | + sub_op = ( |
| 59 | + exir_ops.edge.aten.sub.Tensor |
| 60 | + if is_edge |
| 61 | + else torch.ops.aten.sub.Tensor |
| 62 | + ) |
| 63 | + |
| 64 | + is_scalar = not isinstance(y_arg, torch.fx.Node) |
| 65 | + if is_scalar and is_edge: |
| 66 | + if y_arg not in const_cache: |
| 67 | + attr_name = get_new_attr_name_with_prefix("_remainder_const_")( |
| 68 | + graph_module |
| 69 | + ) |
| 70 | + const_cache[y_arg] = get_const_node( |
| 71 | + graph, graph_module, attr_name, y_arg, node |
| 72 | + ) |
| 73 | + y_node = const_cache[y_arg] |
| 74 | + else: |
| 75 | + y_node = y_arg |
| 76 | + |
| 77 | + with graph.inserting_before(node): |
| 78 | + div_node = graph.create_node( |
| 79 | + "call_function", div_op, (x_node, y_node) |
| 80 | + ) |
| 81 | + div_node.meta = copy_meta(meta) |
| 82 | + |
| 83 | + floor_node = graph.create_node( |
| 84 | + "call_function", floor_op, (div_node,) |
| 85 | + ) |
| 86 | + floor_node.meta = copy_meta(meta) |
| 87 | + |
| 88 | + mul_node = graph.create_node( |
| 89 | + "call_function", mul_op, (floor_node, y_node) |
| 90 | + ) |
| 91 | + mul_node.meta = copy_meta(meta) |
| 92 | + |
| 93 | + sub_node = graph.create_node( |
| 94 | + "call_function", sub_op, (x_node, mul_node) |
| 95 | + ) |
| 96 | + sub_node.meta = copy_meta(meta) |
| 97 | + |
| 98 | + for user in node.users.copy(): |
| 99 | + user.replace_input_with(node, sub_node) |
| 100 | + |
| 101 | + graph.eliminate_dead_code() |
| 102 | + graph_module.recompile() |
| 103 | + return PassResult(graph_module, True) |
0 commit comments