Skip to content

Commit 763515d

Browse files
authored
Merge branch 'main' into feature/standalone-runner
2 parents 3eea609 + 803e47d commit 763515d

32 files changed

Lines changed: 3755 additions & 33 deletions

backends/apple/coreml/partition/coreml_partitioner.py

Lines changed: 36 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,36 @@
3333
logger.setLevel(get_coreml_log_level(default_level=logging.INFO))
3434

3535

36+
# Ops that the CoreML partitioner must always reject regardless of their
37+
# arguments. Each entry is annotated with the upstream issue that motivates
38+
# it so future readers can tell when an entry is safe to drop.
39+
_UNSUPPORTED_OP_TARGETS = frozenset(
40+
[
41+
# https://github.com/apple/coremltools/issues/2565 — diagonal has a
42+
# CoreML correctness bug.
43+
torch.ops.aten.diagonal.default,
44+
torch.ops.aten.diagonal_copy.default,
45+
exir_ops.edge.aten.diagonal.default,
46+
exir_ops.edge.aten.diagonal_copy.default,
47+
# https://github.com/apple/coremltools/issues/2569 — acosh / asinh
48+
# are not implemented in coremltools.
49+
torch.ops.aten.acosh.default,
50+
exir_ops.edge.aten.acosh.default,
51+
torch.ops.aten.asinh.default,
52+
exir_ops.edge.aten.asinh.default,
53+
# https://github.com/pytorch/executorch/issues/11722 — only
54+
# ``aten.rand.default`` actually reaches an unimplemented branch in
55+
# coremltools 9.0 ("not enough values to unpack (expected 5, got 1)"
56+
# raised from the rand handler). ``randn`` / ``rand_like`` /
57+
# ``randn_like`` / ``randint`` lower cleanly today, so we leave them
58+
# delegated. Verified locally against coremltools 9.0 / Python 3.10
59+
# by lowering each op in isolation.
60+
torch.ops.aten.rand.default,
61+
exir_ops.edge.aten.rand.default,
62+
]
63+
)
64+
65+
3666
def _is_view_op(op: torch._ops.OpOverload) -> bool:
3767
schema = op._schema
3868
if len(schema.arguments) == 0:
@@ -92,27 +122,13 @@ def should_override_support(self, node) -> bool:
92122
)
93123
return True
94124

95-
# https://github.com/apple/coremltools/issues/2565
96-
if node.target in [
97-
torch.ops.aten.diagonal.default,
98-
torch.ops.aten.diagonal_copy.default,
99-
exir_ops.edge.aten.diagonal.default,
100-
exir_ops.edge.aten.diagonal_copy.default,
101-
]:
102-
self.log_once(
103-
"torch.ops.aten.diagonal.default has a bug in CoreML. Overriding op support."
104-
)
105-
return True
106-
107-
# https://github.com/apple/coremltools/issues/2569
108-
if node.target in [
109-
torch.ops.aten.acosh.default,
110-
exir_ops.edge.aten.acosh.default,
111-
torch.ops.aten.asinh.default,
112-
exir_ops.edge.aten.asinh.default,
113-
]:
125+
# Ops that are unsupported by CoreML purely on the basis of their
126+
# target — no per-arg conditions to check. Grouped by upstream issue
127+
# so the comment trail still points at the underlying coremltools /
128+
# executorch bug for each entry.
129+
if node.target in _UNSUPPORTED_OP_TARGETS:
114130
self.log_once(
115-
"torch.ops.aten.{acosh, asinh}.default is not supported by CoreML. Overriding op support."
131+
f"{node.target} is not supported by CoreML. Overriding op support."
116132
)
117133
return True
118134

backends/apple/coreml/test/test_coreml_partitioner.py

Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -338,6 +338,54 @@ def forward(self, x):
338338
torch.allclose(et_outputs, eager_outputs, atol=1e-02, rtol=1e-02)
339339
)
340340

341+
def test_aten_rand_default_falls_back_to_portable(self):
342+
"""
343+
Regression test for https://github.com/pytorch/executorch/issues/11722.
344+
345+
coremltools 9.0's ``aten.rand.default`` handler hits an unimplemented
346+
branch (``not enough values to unpack (expected 5, got 1)``). The
347+
partitioner must reject it so the op falls back to the portable
348+
backend instead of crashing the export. Sibling ops like
349+
``aten.randn``, ``aten.rand_like``, etc. lower cleanly and are
350+
intentionally still delegated.
351+
"""
352+
353+
class Model(torch.nn.Module):
354+
def forward(self, x):
355+
return torch.rand(x.shape) + x
356+
357+
model = Model().eval()
358+
example_inputs = (torch.zeros(5, 5),)
359+
exir_program_aten = torch.export.export(model, example_inputs, strict=True)
360+
edge_program_manager = executorch.exir.to_edge_transform_and_lower(
361+
exir_program_aten, partitioner=[CoreMLPartitioner()]
362+
)
363+
op_names = [
364+
node.target.__name__
365+
for node in edge_program_manager.exported_program().graph.nodes
366+
if node.op == "call_function"
367+
]
368+
self.assertIn("aten.rand.default", op_names)
369+
370+
def test_aten_randn_is_still_delegated(self):
371+
"""``aten.randn`` is *not* in the deny list — it lowers cleanly."""
372+
373+
class Model(torch.nn.Module):
374+
def forward(self, x):
375+
return torch.randn(x.shape) + x
376+
377+
ep = torch.export.export(Model().eval(), (torch.zeros(5, 5),), strict=True)
378+
edge = executorch.exir.to_edge_transform_and_lower(
379+
ep, partitioner=[CoreMLPartitioner()]
380+
)
381+
op_names = [
382+
n.target.__name__
383+
for n in edge.exported_program().graph.nodes
384+
if n.op == "call_function"
385+
]
386+
self.assertIn("executorch_call_delegate", op_names)
387+
self.assertNotIn("aten.randn.default", op_names)
388+
341389
def test_deprecation_warning_for_to_backend_workflow(self):
342390
"""
343391
Test that the deprecated to_edge + to_backend workflow shows a deprecation warning.
@@ -435,5 +483,7 @@ def forward(self, x):
435483
test_runner.test_lower_full_graph()
436484
# test_runner.test_symint_arg()
437485
test_runner.test_take_over_constant_data_false()
486+
test_runner.test_aten_rand_default_falls_back_to_portable()
487+
test_runner.test_aten_randn_is_still_delegated()
438488
test_runner.test_deprecation_warning_for_to_backend_workflow()
439489
test_runner.test_no_warning_for_to_edge_transform_and_lower_workflow()
Lines changed: 85 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,85 @@
1+
load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime")
2+
3+
oncall("executorch")
4+
5+
runtime.python_library(
6+
name = "spec",
7+
srcs = ["_spec.py"],
8+
)
9+
10+
runtime.python_library(
11+
name = "custom_ops_lib",
12+
srcs = ["custom_ops_lib.py"],
13+
deps = [
14+
"//caffe2:torch",
15+
],
16+
)
17+
18+
runtime.python_library(
19+
name = "selectors",
20+
srcs = ["_selectors.py"],
21+
deps = [
22+
"//caffe2:torch",
23+
],
24+
)
25+
26+
runtime.python_library(
27+
name = "reducers",
28+
srcs = ["_reducers.py"],
29+
deps = [
30+
"//caffe2:torch",
31+
"//executorch/exir/dialects:lib",
32+
],
33+
)
34+
35+
runtime.python_library(
36+
name = "tap_pass",
37+
srcs = ["_tap_pass.py"],
38+
deps = [
39+
"//caffe2:torch",
40+
":custom_ops_lib",
41+
":reducers",
42+
":selectors",
43+
":spec",
44+
],
45+
)
46+
47+
runtime.python_library(
48+
name = "strip_pass",
49+
srcs = ["_strip_pass.py"],
50+
deps = [
51+
"//caffe2:torch",
52+
":reducers",
53+
":tap_pass",
54+
],
55+
)
56+
57+
runtime.python_library(
58+
name = "convenience",
59+
srcs = ["_convenience.py"],
60+
deps = [
61+
"fbsource//third-party/pypi/pandas:pandas",
62+
"//caffe2:torch",
63+
"//executorch/exir:lib",
64+
"//executorch/runtime:runtime",
65+
":reducers",
66+
":selectors",
67+
":spec",
68+
":strip_pass",
69+
":tap_pass",
70+
],
71+
)
72+
73+
runtime.python_library(
74+
name = "lib",
75+
srcs = ["__init__.py"],
76+
deps = [
77+
":convenience",
78+
":custom_ops_lib",
79+
":reducers",
80+
":selectors",
81+
":spec",
82+
":strip_pass",
83+
":tap_pass",
84+
],
85+
)
Lines changed: 93 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,93 @@
1+
# Copyright (c) Meta Platforms, Inc. and affiliates.
2+
# All rights reserved.
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
7+
# pyre-unsafe
8+
9+
"""
10+
Public API for the ExecuTorch numerical debugger.
11+
12+
Backend-agnostic intermediate-value tap:
13+
14+
- Runtime side : USER_OUTPUT taps (this module — works through delegates without
15+
any backend-side changes)
16+
17+
Typical usage:
18+
19+
from executorch.devtools.intermediate_output_tap import (
20+
compare_aot_runtime_dataframe,
21+
tap_intermediate_outputs, strip_taps_, STATS,
22+
)
23+
24+
ep = export(model, example_inputs)
25+
ep_tapped, specs = tap_intermediate_outputs(ep, reducer=STATS)
26+
aot_flat, _ = pytree.tree_flatten(ep_tapped.module()(*example_inputs))
27+
edge = to_edge_transform_and_lower(ep_tapped, partitioner=[XnnpackPartitioner()])
28+
strip_taps_(edge)
29+
et_program = edge.to_executorch()
30+
31+
rt_flat = runtime.forward(*example_inputs)
32+
df = compare_aot_runtime_dataframe(specs, aot_flat, rt_flat)
33+
"""
34+
35+
# Importing this module registers torch.ops.executorch_devtools.tap.Tensor.
36+
from executorch.devtools.intermediate_output_tap import custom_ops_lib # noqa: F401
37+
from executorch.devtools.intermediate_output_tap._convenience import (
38+
compare_aot_runtime_dataframe,
39+
tap_compare,
40+
)
41+
from executorch.devtools.intermediate_output_tap._reducers import (
42+
FULL_TENSOR,
43+
get_reducer,
44+
StatReducer,
45+
STATS,
46+
)
47+
from executorch.devtools.intermediate_output_tap._selectors import (
48+
NodeSelector,
49+
select_all,
50+
select_all_call_function,
51+
select_any,
52+
select_by_module_class,
53+
select_by_module_path,
54+
select_by_op_type,
55+
select_not,
56+
)
57+
from executorch.devtools.intermediate_output_tap._spec import TapSpec
58+
from executorch.devtools.intermediate_output_tap._strip_pass import strip_taps_
59+
from executorch.devtools.intermediate_output_tap._tap_pass import (
60+
find_tap_nodes,
61+
is_tap_node,
62+
tap_intermediate_outputs_,
63+
TapRule,
64+
)
65+
66+
67+
__all__ = [
68+
# Core API
69+
"tap_intermediate_outputs_",
70+
"strip_taps_",
71+
"TapSpec",
72+
"TapRule",
73+
# Convenience
74+
"tap_compare",
75+
"compare_aot_runtime_dataframe",
76+
# Reducers
77+
"StatReducer",
78+
"FULL_TENSOR",
79+
"STATS",
80+
"get_reducer",
81+
# Selectors
82+
"NodeSelector",
83+
"select_all_call_function",
84+
"select_by_op_type",
85+
"select_by_module_path",
86+
"select_by_module_class",
87+
"select_any",
88+
"select_all",
89+
"select_not",
90+
# Helpers
91+
"find_tap_nodes",
92+
"is_tap_node",
93+
]

0 commit comments

Comments
 (0)