Skip to content

Commit 9076110

Browse files
Migrate devtools tests and clean up remaining executorch/ CaptureConfig refs (#18135)
Differential Revision: D95605485 Pull Request resolved: #18135
1 parent baa9888 commit 9076110

6 files changed

Lines changed: 20 additions & 53 deletions

File tree

backends/qualcomm/utils/utils.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -134,10 +134,6 @@ def is_node_supported(self, _, node: torch.fx.Node) -> bool:
134134
return True
135135

136136

137-
def qnn_capture_config():
138-
return exir.CaptureConfig(enable_aot=True)
139-
140-
141137
def qnn_edge_config() -> exir.EdgeCompileConfig:
142138
return exir.EdgeCompileConfig(
143139
_check_ir_validity=False,

devtools/etrecord/tests/etrecord_test.py

Lines changed: 10 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -618,12 +618,10 @@ def test_add_extra_export_modules(self):
618618

619619
# Create additional module to add
620620
f2 = models.BasicSinMax()
621-
captured_output2 = exir.capture(
622-
f2, f2.get_random_inputs(), exir.CaptureConfig()
623-
)
621+
captured_output2 = export(f2, f2.get_random_inputs(), strict=True)
624622

625623
extra_modules = {
626-
"new_module": captured_output2.exported_program,
624+
"new_module": captured_output2,
627625
}
628626

629627
# Add extra export modules
@@ -640,7 +638,7 @@ def test_add_extra_export_modules(self):
640638
)
641639
self.check_graph_closeness(
642640
etrecord.graph_map["new_module/forward"],
643-
captured_output2.exported_program.graph_module,
641+
captured_output2.graph_module,
644642
)
645643

646644
def test_add_extra_export_modules_reserved_name_validation(self):
@@ -1066,13 +1064,11 @@ def test_add_exported_program_already_exists_exception(self):
10661064

10671065
# Create another exported program to try to add
10681066
f2 = models.BasicSinMax()
1069-
captured_output2 = exir.capture(
1070-
f2, f2.get_random_inputs(), exir.CaptureConfig()
1071-
)
1067+
captured_output2 = export(f2, f2.get_random_inputs(), strict=True)
10721068

10731069
# Verify that adding exported program raises RuntimeError
10741070
with self.assertRaises(RuntimeError) as context:
1075-
etrecord.add_exported_program(captured_output2.exported_program)
1071+
etrecord.add_exported_program(captured_output2)
10761072

10771073
self.assertIn(
10781074
"Exported program already exists in the ETRecord",
@@ -1202,11 +1198,11 @@ def test_add_edge_dialect_program_already_exists_exception(self):
12021198

12031199
# Create another edge program to try to add
12041200
f2 = models.BasicSinMax()
1205-
captured_output2 = exir.capture(
1206-
f2, f2.get_random_inputs(), exir.CaptureConfig()
1207-
)
1208-
edge_output2 = captured_output2.to_edge(
1209-
exir.EdgeCompileConfig(_check_ir_validity=False, _use_edge_ops=False)
1201+
edge_output2 = to_edge(
1202+
export(f2, f2.get_random_inputs(), strict=True),
1203+
compile_config=exir.EdgeCompileConfig(
1204+
_check_ir_validity=False, _use_edge_ops=False
1205+
),
12101206
)
12111207

12121208
# Verify that adding edge dialect program raises RuntimeError

devtools/size_analysis_tool/size_analysis_tool_test.py

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
XnnpackFloatingPointPartitioner,
1212
)
1313
from executorch.backends.xnnpack.utils.configs import (
14+
get_transform_passes,
1415
get_xnnpack_edge_compile_config,
1516
get_xnnpack_executorch_backend_config,
1617
)
@@ -19,6 +20,7 @@
1920
generate_model_size_information,
2021
)
2122
from executorch.exir import to_edge
23+
from executorch.exir.backend.backend_api import to_backend, validation_disabled
2224
from executorch.exir.passes.spec_prop_pass import SpecPropPass
2325
from torch.export import export
2426

@@ -56,10 +58,14 @@ def forward(self, x):
5658
edge_program = to_edge(
5759
export(mm, (test_input,), strict=True),
5860
compile_config=get_xnnpack_edge_compile_config(),
59-
)
61+
).transform(get_transform_passes())
6062
partitioner = XnnpackFloatingPointPartitioner()
6163

62-
delegated_program = edge_program.to_backend(partitioner)
64+
with validation_disabled():
65+
delegated_program = edge_program
66+
delegated_program._edge_programs["forward"] = to_backend(
67+
edge_program.exported_program(), partitioner
68+
)
6369

6470
program = delegated_program.to_executorch(
6571
get_xnnpack_executorch_backend_config([SpecPropPass()]),

test/end2end/exported_module.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,6 @@ def export(
6767
methods: Sequence[str] = ("forward",),
6868
ignore_to_out_var_failure: bool = False,
6969
dynamic_memory_planning_mode: DynamicMemoryPlanningMode = DynamicMemoryPlanningMode.UPPER_BOUND,
70-
capture_config=None,
7170
export_joint_graph: bool = False,
7271
external_constants: bool = False,
7372
export_state_names: bool = False,
@@ -146,8 +145,6 @@ def return_wrapper():
146145

147146
method_name_to_dynamic_shapes = None
148147
if hasattr(eager_module, "get_dynamic_shapes"):
149-
assert capture_config is not None
150-
assert capture_config.enable_aot is True
151148
trace_dynamic_shapes = eager_module.get_dynamic_shapes() # type: ignore[operator]
152149
method_name_to_dynamic_shapes = {}
153150
for method in methods:

test/end2end/test_end2end.py

Lines changed: 1 addition & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -21,12 +21,7 @@
2121
import executorch.extension.pytree as pytree
2222
import torch
2323

24-
from executorch.exir import (
25-
CaptureConfig,
26-
EdgeCompileConfig,
27-
ExecutorchBackendConfig,
28-
memory,
29-
)
24+
from executorch.exir import EdgeCompileConfig, ExecutorchBackendConfig, memory
3025
from executorch.exir.dynamic_shape import DynamicMemoryPlanningMode
3126
from executorch.exir.emit import emit_program
3227
from executorch.exir.pass_manager import PassManager
@@ -471,7 +466,6 @@ def maketest(
471466
allow_non_contiguous_tensor: bool = False,
472467
method: str = "forward",
473468
dynamic_memory_planning_mode: DynamicMemoryPlanningMode = DynamicMemoryPlanningMode.UPPER_BOUND,
474-
capture_config=None,
475469
verify_graph: Optional[Callable] = None,
476470
) -> Callable[[unittest.TestCase], None]:
477471
r"""Returns a TestCase method to test the provided module class and method.
@@ -507,7 +501,6 @@ def wrapper(self: unittest.TestCase) -> None:
507501
methods=(method,),
508502
ignore_to_out_var_failure=ignore_to_out_var_failure,
509503
dynamic_memory_planning_mode=dynamic_memory_planning_mode,
510-
capture_config=capture_config,
511504
)
512505
if verify_graph:
513506
verify_graph(self, module.exported_program.graph_module)
@@ -599,9 +592,6 @@ def test_ops_return_multi(self):
599592
def test_mem_planning_toy_model(self):
600593
maketest(
601594
ToyModelForMemPlanning,
602-
capture_config=exir.CaptureConfig(
603-
enable_dynamic_shape=True,
604-
),
605595
)(self)
606596

607597
# TODO: add ops implementations and turn on 'run_executor'
@@ -621,9 +611,6 @@ def test_containers(self):
621611
maketest(
622612
ModuleContainers,
623613
do_tree_flatten=True,
624-
capture_config=exir.CaptureConfig(
625-
enable_dynamic_shape=True,
626-
),
627614
)(self)
628615

629616
# can not run the graph module since the out variance with tensor list out
@@ -675,9 +662,6 @@ def test_intermediate_dynamic_shape(self):
675662
ModuleIntermediateDynamicShape,
676663
run_graph_module=False,
677664
allow_non_contiguous_tensor=True,
678-
capture_config=exir.CaptureConfig(
679-
enable_dynamic_shape=True,
680-
),
681665
)(self)
682666

683667
# TODO(shunting): some non constant tensors for transformer are non-contiguous.
@@ -697,10 +681,6 @@ def test_transformer_encode(self):
697681
def test_ft_cond_basic(self):
698682
maketest(
699683
FTCondBasic,
700-
capture_config=exir.CaptureConfig(
701-
enable_dynamic_shape=True,
702-
enable_functionalization=False, # TODO enable functionalization
703-
),
704684
)(self)
705685

706686
def test_ft_map_basic(self):
@@ -746,10 +726,6 @@ def test_ft_map_basic(self):
746726
def test_ft_cond_dynshape(self):
747727
maketest(
748728
FTCondDynShape,
749-
capture_config=exir.CaptureConfig(
750-
enable_dynamic_shape=True,
751-
enable_functionalization=False, # TODO enable functionalization
752-
),
753729
)(self)
754730

755731
def test_ft_map_dynshape(self):
@@ -802,9 +778,6 @@ def test_ft_map_dynshape(self):
802778
def test_batch_norm(self):
803779
maketest(
804780
BatchNormModel,
805-
capture_config=exir.CaptureConfig(
806-
enable_dynamic_shape=True,
807-
),
808781
verify_graph=BatchNormModel.verify_graph,
809782
# TODO: lean mode does not have native_batch_norm.out implemented
810783
# run this on aten mode.

test/models/export_program.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@
1313
from typing import Any, Dict, List, Type
1414

1515
import torch
16-
from executorch.exir import CaptureConfig
1716
from executorch.exir.passes import MemoryPlanningPass
1817
from executorch.exir.program._program import ExecutorchProgramManager
1918
from torch import nn
@@ -140,7 +139,7 @@ def get_memory_planning_pass(self):
140139

141140
@staticmethod
142141
def get_export_kwargs():
143-
return {"capture_config": CaptureConfig(pt2_mode=True, enable_aot=True)}
142+
return {}
144143

145144

146145
class ModuleAddMul(torch.nn.Module):

0 commit comments

Comments
 (0)