|
19 | 19 | ToChannelFirstPreprocess, |
20 | 20 | ToChannelLastPreprocess, |
21 | 21 | ) |
| 22 | +from executorch.backends.nxp.tests.graph_verifier import BaseGraphVerifier |
| 23 | +from executorch.backends.nxp.tests.nsys_testing import lower_run_compare |
22 | 24 | from executorch.backends.nxp.tests.use_qat import * # noqa F403 |
23 | 25 |
|
24 | 26 | # noinspection PyProtectedMember |
@@ -47,7 +49,7 @@ def forward(self, x): |
47 | 49 |
|
48 | 50 |
|
49 | 51 | class MaxPool2dModule(torch.nn.Module): |
50 | | - def __init__(self, kernel_size=3, **kwargs): |
| 52 | + def __init__(self, kernel_size: int | tuple[int, ...] = 3, **kwargs): |
51 | 53 | super().__init__() |
52 | 54 | self.max_pool2d = torch.nn.MaxPool2d(kernel_size, **kwargs) |
53 | 55 |
|
@@ -250,3 +252,104 @@ def test_max_pool_2d__from_1d(self, mocker): |
250 | 252 | tflite_input_preprocess=ToChannelLastPreprocess(), |
251 | 253 | tflite_output_preprocess=ToChannelFirstPreprocess(), |
252 | 254 | ) |
| 255 | + |
| 256 | + |
| 257 | +class TestMaxPool2DNewNeutronFlow: |
| 258 | + # noinspection PyMethodMayBeStatic |
| 259 | + def assert_delegated(self, model, input_shape): |
| 260 | + graph_verifier = BaseGraphVerifier( |
| 261 | + exp_num_delegate_call_nodes=1, # Delegated MaxPool. |
| 262 | + exp_non_delegated_nodes=[], |
| 263 | + ) |
| 264 | + |
| 265 | + lower_run_compare( |
| 266 | + model, input_shape, graph_verifier, use_new_flow_neutron_c=True |
| 267 | + ) |
| 268 | + |
| 269 | + # noinspection PyMethodMayBeStatic |
| 270 | + def assert_not_delegated(self, model, input_shape): |
| 271 | + delegated_ep = to_quantized_edge_program( |
| 272 | + model, input_shape, use_new_flow_neutron_c=True |
| 273 | + ).exported_program() |
| 274 | + |
| 275 | + # Make sure the `max_pool2d` was NOT delegated. |
| 276 | + assert not graph_contains_any_of_ops( |
| 277 | + delegated_ep.graph, [ExecutorchDelegateCall] |
| 278 | + ) |
| 279 | + assert graph_contains_any_of_ops(delegated_ep.graph, [MaxPool2D]) |
| 280 | + |
| 281 | + def test__basic_nsys_inference(self): |
| 282 | + input_shape = (2, 4, 6, 7) # The old flow limited the batch size to 1. |
| 283 | + model = MaxPool2dModule() |
| 284 | + self.assert_delegated(model, input_shape) |
| 285 | + |
| 286 | + def test__kernel_size_limit(self): |
| 287 | + kernel_size = (1, 4096) |
| 288 | + input_shape = (1, 4) + kernel_size |
| 289 | + model = MaxPool2dModule(kernel_size) |
| 290 | + self.assert_delegated(model, input_shape) |
| 291 | + |
| 292 | + def test__kernel_size_limit_exceeded(self): |
| 293 | + kernel_size = (1, 4097) # Exceeds the kernel size limit. |
| 294 | + input_shape = (1, 4) + kernel_size |
| 295 | + model = MaxPool2dModule(kernel_size) |
| 296 | + self.assert_not_delegated(model, input_shape) |
| 297 | + |
| 298 | + def test__stride_limit__no_padding(self): |
| 299 | + stride = 4096 |
| 300 | + input_shape = (1, 4, 1, 4096) |
| 301 | + model = MaxPool2dModule(1, stride=stride) |
| 302 | + self.assert_delegated(model, input_shape) |
| 303 | + |
| 304 | + def test__stride_limit_exceeded__no_padding(self): |
| 305 | + stride = 4097 # Exceeds the stride limit. |
| 306 | + input_shape = (1, 4, 1, 4096) |
| 307 | + model = MaxPool2dModule(1, stride=stride) |
| 308 | + self.assert_not_delegated(model, input_shape) |
| 309 | + |
| 310 | + def test__stride_limit__padding(self): |
| 311 | + padding = 1 |
| 312 | + stride = 4096 |
| 313 | + input_shape = (1, 2, 3, stride) |
| 314 | + model = MaxPool2dModule(3, stride=stride, padding=padding) |
| 315 | + self.assert_delegated(model, input_shape) |
| 316 | + |
| 317 | + def test__stride_limit_exceeded__padding(self): |
| 318 | + padding = 1 |
| 319 | + stride = 4097 # Exceeds the stride limit. |
| 320 | + input_shape = (1, 2, 3, stride) |
| 321 | + model = MaxPool2dModule(3, stride=stride, padding=padding) |
| 322 | + self.assert_not_delegated(model, input_shape) |
| 323 | + |
| 324 | + @pytest.mark.skip( |
| 325 | + reason="Large padding requires large kernel size which results in an extremely slow test." |
| 326 | + ) |
| 327 | + def test__padding_limit(self): |
| 328 | + # As the padding is added wia a `Pad` operator (not the `AvgPool` arguments), there is no limit to the padded |
| 329 | + # value. But as padding can be at most half of the kernel size (PyTorch requirement) and kernel size is limited |
| 330 | + # to 4096, padding of 2048 is the limit. |
| 331 | + padding = 2048 |
| 332 | + kernel_size = padding * 2 |
| 333 | + input_shape = (1, 1, 2, 3) |
| 334 | + model = MaxPool2dModule(kernel_size, padding=padding) |
| 335 | + self.assert_delegated(model, input_shape) |
| 336 | + |
| 337 | + def test__padding__avg_pool_limit_exceeded(self): |
| 338 | + # NeutronIR `AvgPool` padding is limited to 32. But as it is added by the `Pad` operator instead, there is no |
| 339 | + # limit. This tests ensures the `AvgPool` padding limit is not a problem. |
| 340 | + padding = 33 |
| 341 | + kernel_size = padding * 2 |
| 342 | + input_shape = (1, 2, 3, 4) |
| 343 | + model = MaxPool2dModule(kernel_size, padding=padding) |
| 344 | + self.assert_delegated(model, input_shape) |
| 345 | + |
| 346 | + def test__padding_to_kernel_ratio_exceeded(self): |
| 347 | + # Both PyTorch and Neutron require the padding to be at most half of the kernel size. |
| 348 | + kernel_size = 3 |
| 349 | + padding = 2 # More than half of the kernel size. |
| 350 | + input_shape = (1, 2, 3, 4) |
| 351 | + model = MaxPool2dModule(kernel_size, padding=padding) |
| 352 | + with pytest.raises( |
| 353 | + RuntimeError, match="pad should be at most half of effective kernel size" |
| 354 | + ): |
| 355 | + to_quantized_edge_program(model, input_shape, use_new_flow_neutron_c=True) |
0 commit comments