diff --git a/backends/arm/test/misc/test_const_shape.py b/backends/arm/test/misc/test_const_shape.py index 5e3d1233b73..c971682911b 100644 --- a/backends/arm/test/misc/test_const_shape.py +++ b/backends/arm/test/misc/test_const_shape.py @@ -35,7 +35,7 @@ def call_operator(self, op, args, kwargs, meta, updated: bool | None = False): return super().call_operator(op, args, kwargs, meta, updated) -def test_const_shape_injects_meta_no_target(): +def test_const_shape_injects_meta(): class M(torch.nn.Module): def forward(self, x): return x + 1 diff --git a/backends/arm/test/misc/test_count_tosa_ops.py b/backends/arm/test/misc/test_count_tosa_ops.py index 2f13f1f05f6..127a4623b36 100644 --- a/backends/arm/test/misc/test_count_tosa_ops.py +++ b/backends/arm/test/misc/test_count_tosa_ops.py @@ -14,7 +14,7 @@ def forward(self, x, y): return x + y -def test_count_tosa_ops_add_no_target(): +def test_count_tosa_ops_add(): model = AddModule() test_data = (torch.randn(1, 8, 8, 8), torch.randn(1, 8, 8, 8)) pipeline = TosaPipelineFP[type(test_data)]( @@ -27,7 +27,7 @@ def test_count_tosa_ops_add_no_target(): pipeline.run() -def test_count_tosa_ops_2_adds_no_target(): +def test_count_tosa_ops_2_adds(): model = AddModule() test_data = (torch.randn(1, 8, 8, 8), torch.randn(1, 8, 8, 8)) pipeline = TosaPipelineFP[type(test_data)]( diff --git a/backends/arm/test/misc/test_debug_hook.py b/backends/arm/test/misc/test_debug_hook.py index 2dfe1307718..92227f7c3cc 100644 --- a/backends/arm/test/misc/test_debug_hook.py +++ b/backends/arm/test/misc/test_debug_hook.py @@ -1,4 +1,4 @@ -# Copyright 2025 Arm Limited and/or its affiliates. +# Copyright 2025-2026 Arm Limited and/or its affiliates. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. @@ -160,7 +160,7 @@ def _compare_node_and_schema(debug_event: DebugSchema, mocked_node): @common.parametrize("test_data", TESTCASES) -def test_debug_hook_add_json_no_target(test_data: DebugHookTestCase): +def test_debug_hook_add_json(test_data: DebugHookTestCase): hook = DebugHook(ArmCompileSpec.DebugMode.JSON) hook.add(cast(Node, test_data.mock_node), test_data.tosa_op, test_data.op_id) @@ -173,7 +173,7 @@ def test_debug_hook_add_json_no_target(test_data: DebugHookTestCase): @common.parametrize("test_data", TESTCASES) -def test_debug_hook_add_tosa_no_target(test_data: DebugHookTestCase): +def test_debug_hook_add_tosa(test_data: DebugHookTestCase): hook = DebugHook(ArmCompileSpec.DebugMode.TOSA) hook.add(cast(Node, test_data.mock_node), test_data.tosa_op, test_data.op_id) diff --git a/backends/arm/test/misc/test_mixed_fp_bf16_partition.py b/backends/arm/test/misc/test_mixed_fp_bf16_partition.py index 7385cd4863c..7851849a7a3 100644 --- a/backends/arm/test/misc/test_mixed_fp_bf16_partition.py +++ b/backends/arm/test/misc/test_mixed_fp_bf16_partition.py @@ -50,7 +50,7 @@ def forward( return self.conv_bf16(x), self.conv_fp32(y) -def test_mixed_fp32_bf16_inputs_rejected_no_target(): +def test_mixed_fp32_bf16_inputs_rejected(): test_data = (torch.randn(1, 3, 8, 8, dtype=torch.float32),) exported_program = to_edge( export(MixedConv(torch.bfloat16), test_data, strict=True), @@ -68,7 +68,7 @@ def test_mixed_fp32_bf16_inputs_rejected_no_target(): assert "Mixed floating-point input" in reporter.get_table_report() -def test_mixed_bf16_cast_fp32_inputs_accepted_no_target(): +def test_mixed_bf16_cast_fp32_inputs_accepted(): test_data = (torch.randn(1, 3, 8, 8, dtype=torch.float32),) exported_program = to_edge( export(CastInputConv(torch.bfloat16), test_data, strict=True) @@ -84,7 +84,7 @@ def test_mixed_bf16_cast_fp32_inputs_accepted_no_target(): assert support.is_node_supported(exported_program.graph_module, conv_node) is True -def test_bf16_rejected_without_tosa_support_no_target(): +def test_bf16_rejected_without_tosa_support(): test_data = (torch.randn(1, 3, 8, 8, dtype=torch.bfloat16),) exported_program = to_edge( export(MixedConv(torch.bfloat16), test_data, strict=True) @@ -101,7 +101,7 @@ def test_bf16_rejected_without_tosa_support_no_target(): assert "Had torch.bfloat16 input" in reporter.get_table_report() -def test_parallel_bf16_fp32_inputs_accepted_no_target(): +def test_parallel_bf16_fp32_inputs_accepted(): test_data = ( torch.randn(1, 3, 8, 8, dtype=torch.bfloat16), torch.randn(1, 3, 8, 8, dtype=torch.float32), diff --git a/backends/arm/test/misc/test_model_evaluator.py b/backends/arm/test/misc/test_model_evaluator.py index b5b1cb3b118..36c98670e0c 100644 --- a/backends/arm/test/misc/test_model_evaluator.py +++ b/backends/arm/test/misc/test_model_evaluator.py @@ -27,7 +27,7 @@ def mocked_model_2(input: torch.Tensor) -> torch.Tensor: class TestModelEvaluator(unittest.TestCase): """Tests the Arm model evaluators.""" - def test_get_model_error_no_target(self): + def test_get_model_error(self): example_input = torch.tensor([[1.0, 2.0, 3.0, 4.0]]) evaluator = NumericalModelEvaluator( "dummy_model", @@ -44,7 +44,7 @@ def test_get_model_error_no_target(self): self.assertEqual(metrics["max_percentage_error"], 25.0) self.assertEqual(metrics["mean_absolute_error"], 0.25) - def test_get_compression_ratio_no_target(self): + def test_get_compression_ratio(self): with tempfile.NamedTemporaryFile(delete=True) as temp_bin: torch.save(COMPRESSION_RATIO_TEST, temp_bin) diff --git a/backends/arm/test/misc/test_pass_pipeline_config.py b/backends/arm/test/misc/test_pass_pipeline_config.py index d017d27ebc4..84575fb04fa 100644 --- a/backends/arm/test/misc/test_pass_pipeline_config.py +++ b/backends/arm/test/misc/test_pass_pipeline_config.py @@ -17,7 +17,7 @@ from executorch.backends.arm.tosa.specification import TosaSpecification -def test_pipeline_config_override_outside_compile_spec_no_target(): +def test_pipeline_config_override_outside_compile_spec(): compile_spec = TosaCompileSpec( TosaSpecification.create_from_string("TOSA-1.00+INT") ) diff --git a/backends/arm/test/misc/test_post_quant_device_switch.py b/backends/arm/test/misc/test_post_quant_device_switch.py index 064a6d16d29..8d4ff95dbc8 100644 --- a/backends/arm/test/misc/test_post_quant_device_switch.py +++ b/backends/arm/test/misc/test_post_quant_device_switch.py @@ -196,7 +196,7 @@ def _to_meta_inputs( @pytest.mark.parametrize("case", _TEST_CASES, ids=[case.name for case in _TEST_CASES]) -def test_post_quant_device_switch_no_target(case: MetaRetraceCase) -> None: +def test_post_quant_device_switch(case: MetaRetraceCase) -> None: """This test tests that moving a model to another device after quantiation works. """ diff --git a/backends/arm/test/misc/test_public_api_manifest.py b/backends/arm/test/misc/test_public_api_manifest.py index b7711c361e7..e891584364b 100644 --- a/backends/arm/test/misc/test_public_api_manifest.py +++ b/backends/arm/test/misc/test_public_api_manifest.py @@ -30,7 +30,7 @@ def _entry_block(path: str, entry: dict[str, str]) -> str: ) -def test_public_api_manifest_entries_are_well_formed_no_target(): +def test_public_api_manifest_entries_are_well_formed(): entries = _collect_public_api() assert entries @@ -46,7 +46,7 @@ def test_public_api_manifest_entries_are_well_formed_no_target(): assert path.rsplit(".", 1)[0] in entries -def test_public_api_manifest_matches_generator_no_target(): +def test_public_api_manifest_matches_generator(): entries = _collect_public_api() manifest = _render_manifest(entries) @@ -60,7 +60,7 @@ def test_public_api_manifest_matches_generator_no_target(): assert manifest == Path(RUNNING_MANIFEST_PATH).read_text(encoding="utf-8") -def test_public_api_manifest_collection_handles_deprecated_symbols_no_target(): +def test_public_api_manifest_collection_handles_deprecated_symbols(): @deprecated("old foo") def old_foo(x: int) -> int: return x @@ -73,7 +73,7 @@ def old_foo(x: int) -> int: assert "old_foo" not in entries -def test_public_api_manifest_collection_excludes_init_for_equivalent_classes_no_target(): +def test_public_api_manifest_collection_excludes_init_for_equivalent_classes(): class ExplicitInit: def __init__(self, x: int = 0) -> None: del x diff --git a/backends/arm/test/misc/test_runner_utils.py b/backends/arm/test/misc/test_runner_utils.py index 240013594bc..10a8b6df3a6 100644 --- a/backends/arm/test/misc/test_runner_utils.py +++ b/backends/arm/test/misc/test_runner_utils.py @@ -17,7 +17,7 @@ def exported_program(self): return object() -def test_run_corstone_no_target_uses_short_input_aliases_in_semihosting_cmd( +def test_run_corstone_uses_short_input_aliases_in_semihosting_cmd( monkeypatch, tmp_path: Path ) -> None: long_input_paths = [ diff --git a/backends/arm/test/misc/test_shared_qspecs.py b/backends/arm/test/misc/test_shared_qspecs.py index 343ce99edde..de07bd5f6c2 100644 --- a/backends/arm/test/misc/test_shared_qspecs.py +++ b/backends/arm/test/misc/test_shared_qspecs.py @@ -622,7 +622,7 @@ def forward(self, x): @parametrize("test_case", test_cases) -def test_shared_qspec_quantizer_no_target(test_case): +def test_shared_qspec_quantizer(test_case): """Test that ops which does not change dynamic range are able to use int8 portable kernels. """ @@ -655,7 +655,7 @@ def test_shared_qspec_quantizer_no_target(test_case): @parametrize("test_case", float_test_cases) -def test_shared_qspec_quantizer_no_qspecs_no_target(test_case): +def test_shared_qspec_quantizer_no_qspecs(test_case): """Test that ops which does not change dynamic range are able to use int8 portable kernels. """ @@ -671,7 +671,7 @@ def test_shared_qspec_quantizer_no_qspecs_no_target(test_case): _check_quant_params(pipeline, test_case.model.quant_params) -def test_maximum_mixed_int8_int16_inputs_no_target(): +def test_maximum_mixed_int8_int16_inputs(): model = MixedMaximumInt8Int16() inputs = (ramp_tensor(-2, 2, (2, 3, 4)),) diff --git a/backends/arm/test/misc/test_tosa_dialect_shape_ops.py b/backends/arm/test/misc/test_tosa_dialect_shape_ops.py index a5de5de648d..c4acdd98bf0 100644 --- a/backends/arm/test/misc/test_tosa_dialect_shape_ops.py +++ b/backends/arm/test/misc/test_tosa_dialect_shape_ops.py @@ -41,7 +41,7 @@ def _expr_equals(sym: torch.SymInt, expected: sympy.Expr) -> bool: # Test that DIM can extract a symbolic dimension from a tensor when the TOSA specification supports the shape extension. -def test_dim_extracts_symbolic_dimension_no_target(): +def test_dim_extracts_symbolic_dimension(): shape_env = ShapeEnv() s0 = _make_symint(shape_env, "s0", hint=4) @@ -59,7 +59,7 @@ def test_dim_extracts_symbolic_dimension_no_target(): # Test that DIM raises an error when the TOSA specification doesn't support the shape extension, as DIM relies on shape # expressions to return symbolic dimensions. -def test_dim_requires_shape_extension_no_target(): +def test_dim_requires_shape_extension(): spec_no_shape = TosaSpecification.create_from_string("TOSA-1.0+FP") shape_env = ShapeEnv() s0 = _make_symint(shape_env, "s0", hint=3) @@ -74,7 +74,7 @@ def test_dim_requires_shape_extension_no_target(): # Test that CONST_SHAPE creates a constant shape tensor and returns the expected shape list. -def test_const_shape_no_target(): +def test_const_shape(): with TosaLoweringContext( TosaSpecification.create_from_string("TOSA-1.1+FP+shape") ), FakeTensorMode(): @@ -83,7 +83,7 @@ def test_const_shape_no_target(): # Test that CONCAT_SHAPE with constant shapes performs concatenation and returns a constant shape. -def test_concat_const_shapes_no_target(): +def test_concat_const_shapes(): with TosaLoweringContext( TosaSpecification.create_from_string("TOSA-1.1+FP+shape") ), FakeTensorMode(): @@ -96,7 +96,7 @@ def test_concat_const_shapes_no_target(): # Test that CONCAT_SHAPE with symbolic shapes produces a symbolic expression concatenating the dimensions. -def test_concat_symbolic_shape_no_target(): +def test_concat_symbolic_shape(): shape_env = ShapeEnv() s0 = _make_symint(shape_env, "s0", hint=2) s1 = _make_symint(shape_env, "s1", hint=3) @@ -116,7 +116,7 @@ def test_concat_symbolic_shape_no_target(): assert _expr(result[1]) == "s1" -def test_concat_mixed_shape_no_target(): +def test_concat_mixed_shape(): shape_env = ShapeEnv() s0 = _make_symint(shape_env, "s0", hint=2) @@ -136,7 +136,7 @@ def test_concat_mixed_shape_no_target(): # Test that CONCAT_SHAPE raises an error when given fewer than 2 shape tensors, as it requires at least 2 to # concatenate. -def test_concat_shape_requires_arguments_no_target(): +def test_concat_shape_requires_arguments(): with pytest.raises( TosaValueError, match="CONCAT_SHAPE expected 2 or more shape tensors" ): @@ -147,7 +147,7 @@ def test_concat_shape_requires_arguments_no_target(): # Test ADD_SHAPE with constant values, which should perform elementwise addition and return a constant shape. -def test_add_const_shape_no_target(): +def test_add_const_shape(): shape_env = ShapeEnv() with TosaLoweringContext( TosaSpecification.create_from_string("TOSA-1.1+FP+shape"), shape_env @@ -160,7 +160,7 @@ def test_add_const_shape_no_target(): # Test ADD_SHAPE with symbolic values, which should produce a symbolic expression adding the two dimensions. -def test_add_symbolic_shape_no_target(): +def test_add_symbolic_shape(): shape_env = ShapeEnv() s0 = _make_symint(shape_env, "s0", hint=2) s1 = _make_symint(shape_env, "s1", hint=3) @@ -179,7 +179,7 @@ def test_add_symbolic_shape_no_target(): assert _expr_equals(result[0], sympy.Symbol("s0") + sympy.Symbol("s1")) -def test_add_mixed_shape_no_target(): +def test_add_mixed_shape(): shape_env = ShapeEnv() s0 = _make_symint(shape_env, "s0", hint=2) @@ -197,7 +197,7 @@ def test_add_mixed_shape_no_target(): # Test SUB_SHAPE with constant values, which should perform subtraction and return a constant shape. -def test_sub_const_shape_no_target(): +def test_sub_const_shape(): shape_env = ShapeEnv() with TosaLoweringContext( TosaSpecification.create_from_string("TOSA-1.1+FP+shape"), shape_env @@ -210,7 +210,7 @@ def test_sub_const_shape_no_target(): # Test SUB_SHAPE with symbolic values, which should produce a Sub expression. -def test_sub_symbolic_shape_no_target(): +def test_sub_symbolic_shape(): shape_env = ShapeEnv() s0 = _make_symint(shape_env, "s0", hint=2) s1 = _make_symint(shape_env, "s1", hint=3) @@ -230,7 +230,7 @@ def test_sub_symbolic_shape_no_target(): assert _expr_equals(result[0], sympy.Symbol("s0") - sympy.Symbol("s1")) -def test_sub_mixed_shape_no_target(): +def test_sub_mixed_shape(): shape_env = ShapeEnv() s0 = _make_symint(shape_env, "s0", hint=3) @@ -249,7 +249,7 @@ def test_sub_mixed_shape_no_target(): # Test MUL_SHAPE with constant values, which should perform multiplication and return a constant shape. -def test_mul_const_shape_no_target(): +def test_mul_const_shape(): shape_env = ShapeEnv() with TosaLoweringContext( TosaSpecification.create_from_string("TOSA-1.1+FP+shape"), shape_env @@ -262,7 +262,7 @@ def test_mul_const_shape_no_target(): # Test MUL_SHAPE with symbolic values, which should produce a Mul expression. -def test_mul_symbolic_shape_no_target(): +def test_mul_symbolic_shape(): shape_env = ShapeEnv() s0 = _make_symint(shape_env, "s0", hint=2) s1 = _make_symint(shape_env, "s1", hint=3) @@ -281,7 +281,7 @@ def test_mul_symbolic_shape_no_target(): assert _expr_equals(result[0], sympy.Symbol("s0") * sympy.Symbol("s1")) -def test_mul_mixed_shape_no_target(): +def test_mul_mixed_shape(): shape_env = ShapeEnv() s0 = _make_symint(shape_env, "s0", hint=4) @@ -299,7 +299,7 @@ def test_mul_mixed_shape_no_target(): # Test DIV_FLOOR_SHAPE with constant values, which should perform floor division and return a constant shape. -def test_div_floor_const_shape_no_target(): +def test_div_floor_const_shape(): shape_env = ShapeEnv() with TosaLoweringContext( TosaSpecification.create_from_string("TOSA-1.1+FP+shape"), shape_env @@ -312,7 +312,7 @@ def test_div_floor_const_shape_no_target(): # Test DIV_FLOOR_SHAPE with symbolic values, which should produce a FloorDiv expression. -def test_div_floor_symbolic_shape_no_target(): +def test_div_floor_symbolic_shape(): shape_env = ShapeEnv() s0 = _make_symint(shape_env, "s0", hint=8) s1 = _make_symint(shape_env, "s1", hint=3) @@ -330,7 +330,7 @@ def test_div_floor_symbolic_shape_no_target(): assert _expr_equals(result[0], sympy.sympify("(s0//s1)")) -def test_div_floor_mixed_shape_no_target(): +def test_div_floor_mixed_shape(): shape_env = ShapeEnv() s0 = _make_symint(shape_env, "s0", hint=4) diff --git a/backends/arm/test/misc/test_tosa_spec.py b/backends/arm/test/misc/test_tosa_spec.py index 7170c7ed4d1..b664d7d63f2 100644 --- a/backends/arm/test/misc/test_tosa_spec.py +++ b/backends/arm/test/misc/test_tosa_spec.py @@ -74,7 +74,7 @@ class TestTosaSpecification(unittest.TestCase): """Tests the TOSA specification class.""" @parameterized.expand(test_valid_strings) # type: ignore[misc] - def test_version_string_no_target(self, version_string: str, expected_type): + def test_version_string(self, version_string: str, expected_type): tosa_spec = TosaSpecification.create_from_string(version_string) assert isinstance(tosa_spec, expected_type) assert [profile in ["INT", "FP"] for profile in tosa_spec.profiles].count( @@ -90,7 +90,7 @@ def test_version_string_no_target(self, version_string: str, expected_type): assert set(tosa_spec.extensions).issubset(allowed_extensions) @parameterized.expand(test_invalid_strings) # type: ignore[misc] - def test_invalid_version_strings_no_target(self, version_string: str): + def test_invalid_version_strings(self, version_string: str): tosa_spec = None with self.assertRaises(ValueError): tosa_spec = TosaSpecification.create_from_string(version_string) @@ -98,14 +98,12 @@ def test_invalid_version_strings_no_target(self, version_string: str): assert tosa_spec is None @parameterized.expand(test_valid_strings) - def test_correct_string_representation_no_target( - self, version_string: str, expected_type - ): + def test_correct_string_representation(self, version_string: str, expected_type): tosa_spec = TosaSpecification.create_from_string(version_string) assert isinstance(tosa_spec, expected_type) assert f"{tosa_spec}" == version_string - def test_supports_new_1_1_extensions_no_target(self): + def test_supports_new_1_1_extensions(self): fp_spec = TosaSpecification.create_from_string( "TOSA-1.1+FP+shape+int64+random+mxfp+blockscale_ue5m3" ) @@ -127,7 +125,7 @@ def test_supports_new_1_1_extensions_no_target(self): class TestTosaSpecMapping(unittest.TestCase): """Tests the TosaSpecMapping class.""" - def test_mapping_no_target(self): + def test_mapping(self): mapping = TosaSpecMapping() mapping.add(TosaSpecification.create_from_string("TOSA-1.0+INT"), "A") # check that the mapping is correct @@ -136,7 +134,7 @@ def test_mapping_no_target(self): assert vals == ["A"] assert len(vals) == 1 - def test_mapping_multiple_no_target(self): + def test_mapping_multiple(self): mapping = TosaSpecMapping() mapping.add(TosaSpecification.create_from_string("TOSA-1.0+INT"), "A") mapping.add(TosaSpecification.create_from_string("TOSA-1.0+INT"), "B") @@ -146,7 +144,7 @@ def test_mapping_multiple_no_target(self): assert vals == ["A", "B"] assert len(vals) == 2 - def test_mapping_different_profiles_no_target(self): + def test_mapping_different_profiles(self): mapping = TosaSpecMapping() mapping.add(TosaSpecification.create_from_string("TOSA-1.0+INT"), "A") mapping.add(TosaSpecification.create_from_string("TOSA-1.0+FP"), "B") @@ -159,7 +157,7 @@ def test_mapping_different_profiles_no_target(self): assert len(vals_int) == 1 assert len(vals_fp) == 1 - def test_mapping_1_1_profiles_no_target(self): + def test_mapping_1_1_profiles(self): mapping = TosaSpecMapping() mapping.add(TosaSpecification.create_from_string("TOSA-1.1+INT"), "A") vals = mapping.get(TosaSpecification.create_from_string("TOSA-1.1+INT")) @@ -167,7 +165,7 @@ def test_mapping_1_1_profiles_no_target(self): assert vals == ["A"] assert len(vals) == 1 - def test_mapping_different_profiles_combined_consumer_no_target(self): + def test_mapping_different_profiles_combined_consumer(self): mapping = TosaSpecMapping() mapping.add(TosaSpecification.create_from_string("TOSA-1.0+INT"), "A") mapping.add(TosaSpecification.create_from_string("TOSA-1.0+FP"), "B") @@ -180,18 +178,18 @@ def test_mapping_different_profiles_combined_consumer_no_target(self): assert "B" in combined_vals assert len(combined_vals) == 2 - def test_mapping_no_spec_no_target(self): + def test_mapping_no_spec(self): mapping = TosaSpecMapping() with self.assertRaises(KeyError): mapping.get(TosaSpecification.create_from_string("TOSA-1.0+INT")) - def test_mapping_no_values_for_spec_no_target(self): + def test_mapping_no_values_for_spec(self): mapping = TosaSpecMapping() mapping.add(TosaSpecification.create_from_string("TOSA-1.0+FP"), "A") with self.assertRaises(KeyError): mapping.get(TosaSpecification.create_from_string("TOSA-1.0+INT")) - def test_spec_with_different_profiles_no_target(self): + def test_spec_with_different_profiles(self): mapping = TosaSpecMapping() mapping.add(TosaSpecification.create_from_string("TOSA-1.0+FP"), "A") mapping.add(TosaSpecification.create_from_string("TOSA-1.0+INT"), "B") @@ -208,20 +206,20 @@ def test_spec_with_different_profiles_no_target(self): assert len(vals_fp) == 1 assert len(vals_int_fp) == 2 - def test_combined_profiles_no_target(self): + def test_combined_profiles(self): mapping = TosaSpecMapping() with self.assertRaises(ValueError): # Don't allow multiple profiles in a single spec mapping.add(TosaSpecification.create_from_string("TOSA-1.0+INT+FP"), "A") - def test_spec_add_with_extension_no_target(self): + def test_spec_add_with_extension(self): mapping = TosaSpecMapping() with self.assertRaises(ValueError): mapping.add( TosaSpecification.create_from_string("TOSA-1.0.0+INT+int16"), "A" ) - def test_spec_non_canonical_key_no_target(self): + def test_spec_non_canonical_key(self): mapping = TosaSpecMapping() mapping.add(TosaSpecification.create_from_string("TOSA-1.0+INT"), "A") diff --git a/backends/arm/test/passes/test_decompose_einsum_pass.py b/backends/arm/test/passes/test_decompose_einsum_pass.py index 78c91c7f92d..3e2f2b92d55 100644 --- a/backends/arm/test/passes/test_decompose_einsum_pass.py +++ b/backends/arm/test/passes/test_decompose_einsum_pass.py @@ -34,7 +34,7 @@ def _get_int8_quantizer() -> TOSAQuantizer: return quantizer -def test_decompose_einsum_no_target_rewrites_export_graph() -> None: +def test_decompose_einsum_rewrites_export_graph() -> None: module = EinsumPermuteModule().eval() exported_program = export(module, module.get_inputs()) diff --git a/backends/arm/test/passes/test_decompose_int16_activation_conv_pass.py b/backends/arm/test/passes/test_decompose_int16_activation_conv_pass.py index dd3f742cf84..725e0ade760 100644 --- a/backends/arm/test/passes/test_decompose_int16_activation_conv_pass.py +++ b/backends/arm/test/passes/test_decompose_int16_activation_conv_pass.py @@ -106,7 +106,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: return x -def test_decompose_conv_with_int16_activation_no_target_fp32_no_decomposition() -> None: +def test_decompose_conv_with_int16_activation_fp32_no_decomposition() -> None: """Test that DecomposeConvWithInt16ActivationPass does NOT decompose convolution when using FP32 (no quantization). """ diff --git a/backends/arm/test/passes/test_decompose_linear_pass.py b/backends/arm/test/passes/test_decompose_linear_pass.py index 1bcd5d1a569..01f00c3e6b7 100644 --- a/backends/arm/test/passes/test_decompose_linear_pass.py +++ b/backends/arm/test/passes/test_decompose_linear_pass.py @@ -25,7 +25,7 @@ def get_example_inputs(self) -> Tuple[torch.Tensor]: return (torch.randn(1, 3, 16),) -def test_decompose_linear_no_target_dynamic() -> None: +def test_decompose_linear_dynamic() -> None: module = Linear() example_inputs = module.get_example_inputs() ep = export( diff --git a/backends/arm/test/passes/test_fuse_consecutive_concat_shapes.py b/backends/arm/test/passes/test_fuse_consecutive_concat_shapes.py index 544433c03ca..9cad9584684 100644 --- a/backends/arm/test/passes/test_fuse_consecutive_concat_shapes.py +++ b/backends/arm/test/passes/test_fuse_consecutive_concat_shapes.py @@ -83,7 +83,7 @@ def _run_fuse_pass(graph_module: GraphModule): return graph_module -def test_fuse_consecutive_concat_shapes_no_target_flattens_nested_concat_inputs(): +def test_fuse_consecutive_concat_shapes_flattens_nested_concat_inputs(): graph_module = _graph_module_with_nested_concat() graph_module = _run_fuse_pass(graph_module) @@ -99,7 +99,7 @@ def test_fuse_consecutive_concat_shapes_no_target_flattens_nested_concat_inputs( ) -def test_fuse_consecutive_concat_shapes_no_target_leaves_flat_concat_unchanged(): +def test_fuse_consecutive_concat_shapes_leaves_flat_concat_unchanged(): graph_module = _graph_module_with_flat_concat() graph_module = _run_fuse_pass(graph_module) diff --git a/backends/arm/test/passes/test_insert_dynamic_padding_pass.py b/backends/arm/test/passes/test_insert_dynamic_padding_pass.py index 01d2f7fd669..35b795ee2d9 100644 --- a/backends/arm/test/passes/test_insert_dynamic_padding_pass.py +++ b/backends/arm/test/passes/test_insert_dynamic_padding_pass.py @@ -26,7 +26,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: return self.conv(x) -def test_insert_dynamic_padding_no_target(): +def test_insert_dynamic_padding(): model = ConvModule() example_inputs = (torch.randn(1, 3, 8, 8),) ep = export( diff --git a/backends/arm/test/passes/test_replace_inf_values_pass.py b/backends/arm/test/passes/test_replace_inf_values_pass.py index 0f4a069f3e3..8d6001c8df8 100644 --- a/backends/arm/test/passes/test_replace_inf_values_pass.py +++ b/backends/arm/test/passes/test_replace_inf_values_pass.py @@ -47,7 +47,7 @@ def _get_mask_buffer(graph_module: fx.GraphModule) -> torch.Tensor: return buffers["mask"] -def test_replace_inf_and_limit_values_no_target_clamps_inf_constants(): +def test_replace_inf_and_limit_values_clamps_inf_constants(): """Trace a module with infinities, run ReplaceInfAndLimitValuesPass, and expect the buffer and scalar literals to be clamped to ±255 with no infinities left. @@ -64,7 +64,7 @@ def test_replace_inf_and_limit_values_no_target_clamps_inf_constants(): assert sorted(_get_add_constants(result.graph_module)) == [-255, 255] -def test_replace_inf_and_limit_values_no_target_respects_disallowed_nodes(): +def test_replace_inf_and_limit_values_respects_disallowed_nodes(): """When nodes opt out of transforms, running the pass in TFA mode should leave the mask buffer untouched while still clamping scalar literals to ±255. diff --git a/backends/arm/test/passes/test_rewrite_le_lt_to_ge_gt_pass.py b/backends/arm/test/passes/test_rewrite_le_lt_to_ge_gt_pass.py index 781bc656be7..f3ed416fe0b 100644 --- a/backends/arm/test/passes/test_rewrite_le_lt_to_ge_gt_pass.py +++ b/backends/arm/test/passes/test_rewrite_le_lt_to_ge_gt_pass.py @@ -26,7 +26,7 @@ def forward( @common.parametrize("module", {"lt_le": LtLe()}) -def test_rewrite_le_lt_to_ge_gt_no_target(module: LtLe) -> None: +def test_rewrite_le_lt_to_ge_gt(module: LtLe) -> None: pipeline = PassPipeline[input_t]( module, module.get_inputs(), diff --git a/backends/arm/test/passes/test_size_adjust_input_pass.py b/backends/arm/test/passes/test_size_adjust_input_pass.py index 71e218e6816..e733ebcd849 100644 --- a/backends/arm/test/passes/test_size_adjust_input_pass.py +++ b/backends/arm/test/passes/test_size_adjust_input_pass.py @@ -46,7 +46,7 @@ def _needs_truncation(input_length, kernel_size, stride, padding): return _greater_than((input_length + 2 * padding - kernel_size) % stride, padding) -def test_size_adjust_input_no_target_static_conv2d(): +def test_size_adjust_input_static_conv2d(): kernel_size, stride, padding = 3, 3, 1 model = ConvModule(kernel_size=kernel_size, stride=stride, padding=padding) example_inputs = (torch.randn(1, 3, 9, 9),) @@ -77,7 +77,7 @@ def test_size_adjust_input_no_target_static_conv2d(): ), "Input width should not need truncation after transformation" -def test_size_adjust_input_no_target_static_conv_no_adjustment_needed(): +def test_size_adjust_input_static_conv_no_adjustment_needed(): kernel_size, stride, padding = 3, 1, 1 model = ConvModule(kernel_size=kernel_size, stride=stride, padding=padding) example_inputs = (torch.randn(1, 3, 9, 9),) @@ -115,7 +115,7 @@ def test_size_adjust_input_no_target_static_conv_no_adjustment_needed(): ), "No slice nodes should be inserted when no adjustment is needed" -def test_size_adjust_input_no_target_dynamic_conv2d(): +def test_size_adjust_input_dynamic_conv2d(): kernel_size, stride, padding = 3, 3, 1 model = ConvModule(kernel_size=kernel_size, stride=stride, padding=padding) example_inputs = (torch.randn(1, 3, 14, 15),) @@ -159,7 +159,7 @@ def test_size_adjust_input_no_target_dynamic_conv2d(): ), "Two slice nodes should be inserted when adjustment is needed" -def test_size_adjust_input_no_target_dynamic_conv_no_adjustment_needed(): +def test_size_adjust_input_dynamic_conv_no_adjustment_needed(): kernel_size, stride, padding = 3, 1, 1 model = ConvModule(kernel_size=kernel_size, stride=stride, padding=padding) example_inputs = (torch.randn(1, 3, 9, 9),) @@ -227,7 +227,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: return self.pool(x) -def test_size_adjust_input_no_target_static_pooling(): +def test_size_adjust_input_static_pooling(): kernel_size, stride, padding = 3, 3, 1 model = PoolingModule(kernel_size=kernel_size, stride=stride, padding=padding) example_inputs = (torch.randn(1, 3, 9, 9),) @@ -261,7 +261,7 @@ def test_size_adjust_input_no_target_static_pooling(): ), "Input width should not need truncation after transformation" -def test_size_adjust_input_no_target_static_pooling_no_adjustment_needed(): +def test_size_adjust_input_static_pooling_no_adjustment_needed(): kernel_size, stride, padding = 3, 1, 1 model = PoolingModule(kernel_size=kernel_size, stride=stride, padding=padding) example_inputs = (torch.randn(1, 3, 9, 9),) @@ -303,7 +303,7 @@ def test_size_adjust_input_no_target_static_pooling_no_adjustment_needed(): ), "No slice nodes should be inserted when no adjustment is needed" -def test_size_adjust_input_no_target_dynamic_pooling(): +def test_size_adjust_input_dynamic_pooling(): kernel_size, stride, padding = 3, 3, 1 model = PoolingModule(kernel_size=kernel_size, stride=stride, padding=padding) example_inputs = (torch.randn(1, 3, 18, 18),) @@ -355,7 +355,7 @@ def test_size_adjust_input_no_target_dynamic_pooling(): ), "Two slice nodes should be inserted when adjustment is needed" -def test_size_adjust_input_no_target_dynamic_pooling_no_adjustment_needed(): +def test_size_adjust_input_dynamic_pooling_no_adjustment_needed(): kernel_size, stride, padding = 3, 1, 1 model = PoolingModule(kernel_size=kernel_size, stride=stride, padding=padding) example_inputs = (torch.randn(1, 3, 18, 18),) diff --git a/backends/arm/test/quantizer/test_partial_quantization.py b/backends/arm/test/quantizer/test_partial_quantization.py index f8ba1d8d8d5..414d0c802fd 100644 --- a/backends/arm/test/quantizer/test_partial_quantization.py +++ b/backends/arm/test/quantizer/test_partial_quantization.py @@ -101,7 +101,7 @@ def _assert_disallow_flags( ), f"Node '{node_name}': expected DISALLOW_TFA_META_KEY={expected_flag}, got {actual_flag}" -def test_disallow_tfa_for_skipped_module_no_target(): +def test_disallow_tfa_for_skipped_module(): """Ensure a softmax skipped for quantization is not decomposed and that its node has `disallow_tfa` set. """ @@ -133,7 +133,7 @@ def example_inputs(self) -> tuple[torch.Tensor, ...]: ) -def test_disallow_tfa_for_two_skipped_modules_no_target(): +def test_disallow_tfa_for_two_skipped_modules(): """Ensure a softmax and linear skipped for quantization are not decomposed and have their `disallow_tfa` set. """ @@ -156,7 +156,7 @@ def test_disallow_tfa_for_two_skipped_modules_no_target(): ) -def test_disallow_tfa_with_global_none_and_one_quantized_module_no_target(): +def test_disallow_tfa_with_global_none_and_one_quantized_module(): """Ensure that with a global None quantization config, only the linear module (with its own quantization config) is quantized, and that the other nodes have `disallow_tfa` set. @@ -180,7 +180,7 @@ def test_disallow_tfa_with_global_none_and_one_quantized_module_no_target(): ) -def test_disallow_tfa_for_submodule_by_name_no_target(): +def test_disallow_tfa_for_submodule_by_name(): """Ensure submodules can be skipped for quantization by name and have their nodes marked as disallowed for TFA. """ @@ -203,7 +203,7 @@ def test_disallow_tfa_for_submodule_by_name_no_target(): ) -def test_disallow_tfa_name_config_contradicts_type_config_no_target(): +def test_disallow_tfa_name_config_contradicts_type_config(): """Ensure that module name configs take precedence over module type configs when they contradict each other. """