diff --git a/backends/arm/test/ops/test_add.py b/backends/arm/test/ops/test_add.py index 321a549931b..367a1d74599 100644 --- a/backends/arm/test/ops/test_add.py +++ b/backends/arm/test/ops/test_add.py @@ -339,6 +339,46 @@ def test_add_conv_residual_u85_INT(test_data: input_t1): pipeline.run() +class AddDualConv(torch.nn.Module): + """Conv1(x) + conv2(x) — both inputs have Rescale producers.""" + + def __init__(self): + super().__init__() + self.conv1 = torch.nn.Conv2d(3, 3, 1, bias=False) + self.conv2 = torch.nn.Conv2d(3, 3, 1, bias=False) + + def forward(self, x): + return self.conv1(x) + self.conv2(x) + + test_data = { + "4d_randn": lambda: (torch.randn(1, 3, 4, 4),), + } + + +@common.parametrize("test_data", AddDualConv.test_data) +def test_add_dual_conv_tosa_INT(test_data: input_t1): + pipeline = TosaPipelineINT[input_t1](AddDualConv(), test_data(), aten_op, exir_op) + pipeline.run() + + +@common.parametrize("test_data", AddDualConv.test_data) +@common.XfailIfNoCorstone300 +def test_add_dual_conv_u55_INT(test_data: input_t1): + pipeline = EthosU55PipelineINT[input_t1]( + AddDualConv(), test_data(), aten_op, exir_op + ) + pipeline.run() + + +@common.parametrize("test_data", AddDualConv.test_data) +@common.XfailIfNoCorstone320 +def test_add_dual_conv_u85_INT(test_data: input_t1): + pipeline = EthosU85PipelineINT[input_t1]( + AddDualConv(), test_data(), aten_op, exir_op + ) + pipeline.run() + + @common.parametrize("test_data", Add.test_data) def test_add_tensor_tosa_INT_16a8w(test_data: input_t1): """Test add operation with 16A8W quantization (16-bit activations, 8-bit diff --git a/backends/arm/test/ops/test_sub.py b/backends/arm/test/ops/test_sub.py index 3da96096549..4cce10a9974 100644 --- a/backends/arm/test/ops/test_sub.py +++ b/backends/arm/test/ops/test_sub.py @@ -354,6 +354,46 @@ def test_sub_conv_residual_u85_INT(test_data: input_t1): pipeline.run() +class SubDualConv(torch.nn.Module): + """conv1(x) - conv2(x) — both inputs have Rescale producers.""" + + def __init__(self): + super().__init__() + self.conv1 = torch.nn.Conv2d(3, 3, 1, bias=False) + self.conv2 = torch.nn.Conv2d(3, 3, 1, bias=False) + + def forward(self, x): + return self.conv1(x) - self.conv2(x) + + test_data = { + "4d_randn": lambda: (torch.randn(1, 3, 4, 4),), + } + + +@common.parametrize("test_data", SubDualConv.test_data) +def test_sub_dual_conv_tosa_INT(test_data: input_t1): + pipeline = TosaPipelineINT[input_t1](SubDualConv(), test_data(), aten_op, exir_op) + pipeline.run() + + +@common.parametrize("test_data", SubDualConv.test_data) +@common.XfailIfNoCorstone300 +def test_sub_dual_conv_u55_INT(test_data: input_t1): + pipeline = EthosU55PipelineINT[input_t1]( + SubDualConv(), test_data(), aten_op, exir_op + ) + pipeline.run() + + +@common.parametrize("test_data", SubDualConv.test_data) +@common.XfailIfNoCorstone320 +def test_sub_dual_conv_u85_INT(test_data: input_t1): + pipeline = EthosU85PipelineINT[input_t1]( + SubDualConv(), test_data(), aten_op, exir_op + ) + pipeline.run() + + @common.parametrize("test_data", sub_test_data) def test_sub_tensor_16a8w_tosa_INT(test_data: input_t1): """Test sub operation with 16A8W quantization (16-bit activations, 8-bit