Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 34 additions & 0 deletions backends/xnnpack/test/models/deeplab_v3.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,37 @@ def forward(self, *args):
return self.m(*args)["out"]


class DynamicDL3Wrapper(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = deeplabv3_resnet50(
weights=deeplabv3.DeepLabV3_ResNet50_Weights.DEFAULT
)

def forward(self, x):
x = torch.nn.functional.interpolate(
x,
size=(224, 224),
mode="bilinear",
align_corners=True,
antialias=False,
)
return self.m(x)["out"]


class TestDeepLabV3(unittest.TestCase):
def setUp(self):
torch._dynamo.reset()

dl3 = DL3Wrapper()
dl3 = dl3.eval()
model_inputs = (torch.randn(1, 3, 224, 224),)
dynamic_shapes = (
{
2: torch.export.Dim("height", min=224, max=455),
3: torch.export.Dim("width", min=224, max=455),
},
)

def test_fp32_dl3(self):

Expand All @@ -40,3 +64,13 @@ def test_fp32_dl3(self):
.serialize()
.run_method_and_compare_outputs()
)

def test_fp32_dl3_dynamic(self):
(
Tester(DynamicDL3Wrapper(), self.model_inputs, self.dynamic_shapes)
.export()
.to_edge_transform_and_lower()
.to_executorch()
.serialize()
.run_method_and_compare_outputs()
)
32 changes: 32 additions & 0 deletions backends/xnnpack/test/models/edsr.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,34 @@
from torchsr.models import edsr_r16f64


class DynamicEDSR(torch.nn.Module):
def __init__(self):
super().__init__()
self.model = edsr_r16f64(2, False).eval()

def forward(self, x):
x = torch.nn.functional.interpolate(
x,
size=(224, 224),
mode="bilinear",
align_corners=True,
antialias=False,
)
return self.model(x)


class TestEDSR(unittest.TestCase):
def setUp(self):
torch._dynamo.reset()

edsr = edsr_r16f64(2, False).eval() # noqa
model_inputs = (torch.randn(1, 3, 224, 224),)
dynamic_shapes = (
{
2: torch.export.Dim("height", min=224, max=455),
3: torch.export.Dim("width", min=224, max=455),
},
)

def test_fp32_edsr(self):
(
Expand Down Expand Up @@ -53,3 +75,13 @@ def test_qs8_edsr_no_calibrate(self):
.serialize()
.run_method_and_compare_outputs()
)

def test_fp32_edsr_dynamic(self):
(
Tester(DynamicEDSR(), self.model_inputs, self.dynamic_shapes)
.export()
.to_edge_transform_and_lower()
.to_executorch()
.serialize()
.run_method_and_compare_outputs()
)
38 changes: 38 additions & 0 deletions backends/xnnpack/test/models/emformer_rnnt.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,24 @@ def test_fp32_emformer_joiner(self):
.run_method_and_compare_outputs()
)

def test_fp32_emformer_joiner_dynamic(self):
joiner = self.Joiner()
dynamic_shapes = (
{0: torch.export.Dim("batch", min=1, max=4)},
None,
{0: torch.export.Dim("batch", min=1, max=4)},
None,
)
(
Tester(joiner, joiner.get_example_inputs(), dynamic_shapes=dynamic_shapes)
.export()
.to_edge_transform_and_lower()
.check(["torch.ops.higher_order.executorch_call_delegate"])
.to_executorch()
.serialize()
.run_method_and_compare_outputs()
)

class Predictor(EmformerRnnt):
def forward(self, a, b):
return self.rnnt.predict(a, b, None)
Expand Down Expand Up @@ -96,3 +114,23 @@ def test_fp32_emformer_transcriber(self):
.serialize()
.run_method_and_compare_outputs()
)

def test_fp32_emformer_transcriber_dynamic(self):
transcriber = self.Transcriber()
dynamic_shapes = (
{0: torch.export.Dim("batch", min=1, max=4)},
None,
)
(
Tester(
transcriber,
transcriber.get_example_inputs(),
dynamic_shapes=dynamic_shapes,
)
.export()
.to_edge_transform_and_lower()
.check(["torch.ops.higher_order.executorch_call_delegate"])
.to_executorch()
.serialize()
.run_method_and_compare_outputs()
)
33 changes: 33 additions & 0 deletions backends/xnnpack/test/models/inception_v3.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,34 @@
from torchvision import models


class DynamicInceptionV3(torch.nn.Module):
def __init__(self):
super().__init__()
self.model = models.inception_v3(weights="IMAGENET1K_V1").eval()

def forward(self, x):
x = torch.nn.functional.interpolate(
x,
size=(224, 224),
mode="bilinear",
align_corners=True,
antialias=False,
)
return self.model(x)


class TestInceptionV3(unittest.TestCase):
def setUp(self):
torch._dynamo.reset()

ic3 = models.inception_v3(weights="IMAGENET1K_V1").eval() # noqa
model_inputs = (torch.randn(1, 3, 224, 224),)
dynamic_shapes = (
{
2: torch.export.Dim("height", min=224, max=455),
3: torch.export.Dim("width", min=224, max=455),
},
)

all_operators = {
"executorch_exir_dialects_edge__ops_aten_addmm_default",
Expand Down Expand Up @@ -82,3 +104,14 @@ def test_qs8_ic3_no_calibration(self):
.serialize()
.run_method_and_compare_outputs()
)

def test_fp32_ic3_dynamic(self):
(
Tester(DynamicInceptionV3(), self.model_inputs, self.dynamic_shapes)
.export()
.to_edge_transform_and_lower()
.check(["torch.ops.higher_order.executorch_call_delegate"])
.to_executorch()
.serialize()
.run_method_and_compare_outputs()
)
33 changes: 33 additions & 0 deletions backends/xnnpack/test/models/inception_v4.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,34 @@
from timm.models import inception_v4


class DynamicInceptionV4(torch.nn.Module):
def __init__(self):
super().__init__()
self.model = inception_v4(pretrained=False).eval()

def forward(self, x):
x = torch.nn.functional.interpolate(
x,
size=(299, 299),
mode="bilinear",
align_corners=True,
antialias=False,
)
return self.model(x)


class TestInceptionV4(unittest.TestCase):
def setUp(self):
torch._dynamo.reset()

ic4 = inception_v4(pretrained=False).eval()
model_inputs = (torch.randn(3, 299, 299).unsqueeze(0),)
dynamic_shapes = (
{
2: torch.export.Dim("height", min=299, max=455),
3: torch.export.Dim("width", min=299, max=455),
},
)

all_operators = {
"executorch_exir_dialects_edge__ops_aten_addmm_default",
Expand Down Expand Up @@ -60,3 +82,14 @@ def test_qs8_ic4(self):
.serialize()
.run_method_and_compare_outputs()
)

def test_fp32_ic4_dynamic(self):
(
Tester(DynamicInceptionV4(), self.model_inputs, self.dynamic_shapes)
.export()
.to_edge_transform_and_lower()
.check(["torch.ops.higher_order.executorch_call_delegate"])
.to_executorch()
.serialize()
.run_method_and_compare_outputs()
)
17 changes: 17 additions & 0 deletions backends/xnnpack/test/models/mobilebert.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,8 @@ def setUp(self):
"executorch_exir_dialects_edge__ops_aten_constant_pad_nd_default",
}

dynamic_shapes = ({1: torch.export.Dim("seq_length", min=2, max=32)},)

def test_fp32_mobilebert(self):
(
Tester(self.mobilebert, self.example_inputs)
Expand All @@ -53,3 +55,18 @@ def test_qs8_mobilebert(self):
.serialize()
.run_method_and_compare_outputs(inputs=self.example_inputs)
)

def test_fp32_mobilebert_dynamic(self):
(
Tester(
self.mobilebert,
self.example_inputs,
dynamic_shapes=self.dynamic_shapes,
)
.export()
.to_edge_transform_and_lower()
.check(["torch.ops.higher_order.executorch_call_delegate"])
.to_executorch()
.serialize()
.run_method_and_compare_outputs(inputs=self.example_inputs)
)
Loading